diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bac487c65627a9..b00919e4c8728d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -18,3 +18,4 @@ be/src/io/* @platoneko @gavinchou @dataroaring be/src/agent/be_exec_version_manager.cpp @BiteTheDDDDt fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @dataroaring @CalvinKirs @morningman **/pom.xml @CalvinKirs @morningman +fe/fe-common/src/main/java/org/apache/doris/common/FeMetaVersion.java @dataroaring @morningman @yiguolei @xiaokang diff --git a/.github/workflows/sonarcloud.yml b/.github/workflows/sonarcloud.yml index 5ee8bdf24e95f9..e2eb4b14d43161 100644 --- a/.github/workflows/sonarcloud.yml +++ b/.github/workflows/sonarcloud.yml @@ -41,11 +41,11 @@ jobs: - 'fe/**' - 'gensrc/proto/**' - 'gensrc/thrift/**' - - name: Set up JDK 11 + - name: Set up JDK 17 if: ${{ steps.filter.outputs.fe_changes == 'true' }} uses: actions/setup-java@v3 with: - java-version: 11 + java-version: 17 distribution: 'adopt' - name: Cache SonarCloud packages if: ${{ steps.filter.outputs.fe_changes == 'true' }} diff --git a/be/src/agent/be_exec_version_manager.cpp b/be/src/agent/be_exec_version_manager.cpp index 32cbe569892e6a..e44829ae39b8a0 100644 --- a/be/src/agent/be_exec_version_manager.cpp +++ b/be/src/agent/be_exec_version_manager.cpp @@ -17,12 +17,9 @@ #include "agent/be_exec_version_manager.h" -namespace doris { +#include "common/exception.h" -const std::map> AGGREGATION_CHANGE_MAP = { - {AGGREGATION_2_1_VERSION, - {"window_funnel", "stddev_samp", "variance_samp", "percentile_approx_weighted", - "percentile_approx", "covar_samp", "percentile", "percentile_array"}}}; +namespace doris { Status BeExecVersionManager::check_be_exec_version(int be_exec_version) { if (be_exec_version > max_be_exec_version || be_exec_version < min_be_exec_version) { @@ -35,19 +32,35 @@ Status BeExecVersionManager::check_be_exec_version(int be_exec_version) { return Status::OK(); } -void BeExecVersionManager::check_agg_state_compatibility(int current_be_exec_version, - int data_be_exec_version, - std::string function_name) { - if (current_be_exec_version > AGGREGATION_2_1_VERSION && - data_be_exec_version <= AGGREGATION_2_1_VERSION && - AGGREGATION_CHANGE_MAP.find(AGGREGATION_2_1_VERSION)->second.contains(function_name)) { - throw Exception(Status::InternalError( - "agg state data with {} is not supported, " - "current_be_exec_version={}, data_be_exec_version={}, need to rebuild the data " - "or set the be_exec_version={} in fe.conf", - function_name, current_be_exec_version, data_be_exec_version, - AGGREGATION_2_1_VERSION)); +int BeExecVersionManager::get_function_compatibility(int be_exec_version, + std::string function_name) { + auto it = _function_change_map.find(function_name); + if (it == _function_change_map.end()) { + // 0 means no compatibility issues need to be dealt with + return 0; + } + + auto version_it = it->second.lower_bound(be_exec_version); + if (version_it == it->second.end()) { + return 0; + } + + return *version_it; +} + +void BeExecVersionManager::check_function_compatibility(int current_be_exec_version, + int data_be_exec_version, + std::string function_name) { + if (get_function_compatibility(current_be_exec_version, function_name) == + get_function_compatibility(data_be_exec_version, function_name)) { + return; } + + throw Exception(Status::InternalError( + "agg state data with {} is not supported, " + "current_be_exec_version={}, data_be_exec_version={}, need to rebuild the data " + "or set the be_exec_version={} in fe.conf temporary", + function_name, current_be_exec_version, data_be_exec_version, data_be_exec_version)); } /** @@ -88,5 +101,5 @@ void BeExecVersionManager::check_agg_state_compatibility(int current_be_exec_ver */ const int BeExecVersionManager::max_be_exec_version = 7; const int BeExecVersionManager::min_be_exec_version = 0; - +std::map> BeExecVersionManager::_function_change_map {}; } // namespace doris diff --git a/be/src/agent/be_exec_version_manager.h b/be/src/agent/be_exec_version_manager.h index 16092197a3aa10..7ab3c7de23ae64 100644 --- a/be/src/agent/be_exec_version_manager.h +++ b/be/src/agent/be_exec_version_manager.h @@ -25,13 +25,14 @@ namespace doris { +constexpr static int AGG_FUNCTION_NEW_WINDOW_FUNNEL = 6; constexpr inline int BITMAP_SERDE = 3; constexpr inline int USE_NEW_SERDE = 4; // release on DORIS version 2.1 constexpr inline int OLD_WAL_SERDE = 3; // use to solve compatibility issues, see pr #32299 constexpr inline int AGG_FUNCTION_NULLABLE = 5; // change some agg nullable property: PR #37215 constexpr inline int VARIANT_SERDE = 6; // change variant serde to fix PR #38413 constexpr inline int AGGREGATION_2_1_VERSION = - 5; // some aggregation changed the data format after this version + 6; // some aggregation changed the data format after this version class BeExecVersionManager { public: @@ -39,14 +40,28 @@ class BeExecVersionManager { static Status check_be_exec_version(int be_exec_version); - static void check_agg_state_compatibility(int current_be_exec_version, int data_be_exec_version, - std::string function_name); + static int get_function_compatibility(int be_exec_version, std::string function_name); + + static void check_function_compatibility(int current_be_exec_version, int data_be_exec_version, + std::string function_name); static int get_newest_version() { return max_be_exec_version; } + static std::string get_function_suffix(int be_exec_version) { + return "_for_old_version_" + std::to_string(be_exec_version); + } + + // For example, there are incompatible changes between version=7 and version=6, at this time breaking_old_version is 6. + static void registe_old_function_compatibility(int breaking_old_version, + std::string function_name) { + _function_change_map[function_name].insert(breaking_old_version); + } + private: static const int max_be_exec_version; static const int min_be_exec_version; + // [function name] -> [breaking change start version] + static std::map> _function_change_map; }; } // namespace doris diff --git a/be/src/agent/heartbeat_server.cpp b/be/src/agent/heartbeat_server.cpp index e981e9165b5e1d..562950b54d4621 100644 --- a/be/src/agent/heartbeat_server.cpp +++ b/be/src/agent/heartbeat_server.cpp @@ -246,36 +246,43 @@ Status HeartbeatServer::_heartbeat(const TMasterInfo& master_info) { } if (master_info.__isset.meta_service_endpoint != config::is_cloud_mode()) { - return Status::InvalidArgument( + LOG(WARNING) << "Detected mismatch in cloud mode configuration between FE and BE. " + << "FE cloud mode: " + << (master_info.__isset.meta_service_endpoint ? "true" : "false") + << ", BE cloud mode: " << (config::is_cloud_mode() ? "true" : "false"); + return Status::InvalidArgument( "fe and be do not work in same mode, fe cloud mode: {}," " be cloud mode: {}", master_info.__isset.meta_service_endpoint, config::is_cloud_mode()); } - if (master_info.__isset.meta_service_endpoint && config::meta_service_endpoint.empty() && - !master_info.meta_service_endpoint.empty()) { - auto st = config::set_config("meta_service_endpoint", master_info.meta_service_endpoint, - true); - LOG(INFO) << "set config meta_service_endpoing " << master_info.meta_service_endpoint << " " - << st; - } - - if (master_info.__isset.cloud_instance_id) { - if (!config::cloud_instance_id.empty() && - config::cloud_instance_id != master_info.cloud_instance_id) { - return Status::InvalidArgument( - "cloud_instance_id in fe.conf and be.conf are not same, fe: {}, be: {}", - master_info.cloud_instance_id, config::cloud_instance_id); + if (master_info.__isset.meta_service_endpoint) { + if (config::meta_service_endpoint.empty() && !master_info.meta_service_endpoint.empty()) { + auto st = config::set_config("meta_service_endpoint", master_info.meta_service_endpoint, + true); + LOG(INFO) << "set config meta_service_endpoing " << master_info.meta_service_endpoint + << " " << st; } - if (config::cloud_instance_id.empty() && !master_info.cloud_instance_id.empty()) { - auto st = config::set_config("cloud_instance_id", master_info.cloud_instance_id, true); - config::set_cloud_unique_id(master_info.cloud_instance_id); - LOG(INFO) << "set config cloud_instance_id " << master_info.cloud_instance_id << " " - << st; + if (master_info.meta_service_endpoint != config::meta_service_endpoint) { + LOG(WARNING) << "Detected mismatch in meta_service_endpoint configuration between FE " + "and BE. " + << "FE meta_service_endpoint: " << master_info.meta_service_endpoint + << ", BE meta_service_endpoint: " << config::meta_service_endpoint; + return Status::InvalidArgument( + "fe and be do not work in same mode, fe meta_service_endpoint: {}," + " be meta_service_endpoint: {}", + master_info.meta_service_endpoint, config::meta_service_endpoint); } } + if (master_info.__isset.cloud_unique_id && + config::cloud_unique_id != master_info.cloud_unique_id && + config::enable_use_cloud_unique_id_from_fe) { + auto st = config::set_config("cloud_unique_id", master_info.cloud_unique_id, true); + LOG(INFO) << "set config cloud_unique_id " << master_info.cloud_unique_id << " " << st; + } + return Status::OK(); } diff --git a/be/src/cloud/cloud_base_compaction.cpp b/be/src/cloud/cloud_base_compaction.cpp index 8cf1131695f38c..e9753cebe82c18 100644 --- a/be/src/cloud/cloud_base_compaction.cpp +++ b/be/src/cloud/cloud_base_compaction.cpp @@ -321,7 +321,7 @@ Status CloudBaseCompaction::modify_rowsets() { std::numeric_limits::max(); RETURN_IF_ERROR(cloud_tablet()->calc_delete_bitmap_for_compaction( _input_rowsets, _output_rowset, _rowid_conversion, compaction_type(), - _stats.merged_rows, initiator, output_rowset_delete_bitmap, + _stats.merged_rows, _stats.filtered_rows, initiator, output_rowset_delete_bitmap, _allow_delete_in_cumu_compaction)); LOG_INFO("update delete bitmap in CloudBaseCompaction, tablet_id={}, range=[{}-{}]", _tablet->tablet_id(), _input_rowsets.front()->start_version(), diff --git a/be/src/cloud/cloud_cumulative_compaction.cpp b/be/src/cloud/cloud_cumulative_compaction.cpp index 29cfe412fa89d8..ea5fa7cc340158 100644 --- a/be/src/cloud/cloud_cumulative_compaction.cpp +++ b/be/src/cloud/cloud_cumulative_compaction.cpp @@ -18,6 +18,7 @@ #include "cloud/cloud_cumulative_compaction.h" #include "cloud/cloud_meta_mgr.h" +#include "cloud/cloud_tablet_mgr.h" #include "cloud/config.h" #include "common/config.h" #include "common/logging.h" @@ -27,6 +28,7 @@ #include "olap/compaction.h" #include "olap/cumulative_compaction_policy.h" #include "service/backend_options.h" +#include "util/debug_points.h" #include "util/trace.h" #include "util/uuid_generator.h" @@ -254,13 +256,13 @@ Status CloudCumulativeCompaction::modify_rowsets() { compaction_job->add_output_rowset_ids(_output_rowset->rowset_id().to_string()); DeleteBitmapPtr output_rowset_delete_bitmap = nullptr; + int64_t initiator = + HashUtil::hash64(_uuid.data(), _uuid.size(), 0) & std::numeric_limits::max(); if (_tablet->keys_type() == KeysType::UNIQUE_KEYS && _tablet->enable_unique_key_merge_on_write()) { - int64_t initiator = HashUtil::hash64(_uuid.data(), _uuid.size(), 0) & - std::numeric_limits::max(); RETURN_IF_ERROR(cloud_tablet()->calc_delete_bitmap_for_compaction( _input_rowsets, _output_rowset, _rowid_conversion, compaction_type(), - _stats.merged_rows, initiator, output_rowset_delete_bitmap, + _stats.merged_rows, _stats.filtered_rows, initiator, output_rowset_delete_bitmap, _allow_delete_in_cumu_compaction)); LOG_INFO("update delete bitmap in CloudCumulativeCompaction, tablet_id={}, range=[{}-{}]", _tablet->tablet_id(), _input_rowsets.front()->start_version(), @@ -340,9 +342,88 @@ Status CloudCumulativeCompaction::modify_rowsets() { stats.num_rows(), stats.data_size()); } } + if (_tablet->keys_type() == KeysType::UNIQUE_KEYS && + _tablet->enable_unique_key_merge_on_write() && _input_rowsets.size() != 1) { + process_old_version_delete_bitmap(); + } return Status::OK(); } +void CloudCumulativeCompaction::process_old_version_delete_bitmap() { + // agg previously rowset old version delete bitmap + std::vector pre_rowsets {}; + std::vector pre_rowset_ids {}; + for (const auto& it : cloud_tablet()->rowset_map()) { + if (it.first.second < _input_rowsets.front()->start_version()) { + pre_rowsets.emplace_back(it.second); + pre_rowset_ids.emplace_back(it.second->rowset_id().to_string()); + } + } + std::sort(pre_rowsets.begin(), pre_rowsets.end(), Rowset::comparator); + if (!pre_rowsets.empty()) { + auto pre_max_version = _output_rowset->version().second; + DeleteBitmapPtr new_delete_bitmap = + std::make_shared(_tablet->tablet_meta()->tablet_id()); + std::vector> + to_remove_vec; + for (auto& rowset : pre_rowsets) { + if (rowset->rowset_meta()->total_disk_size() == 0) { + continue; + } + for (uint32_t seg_id = 0; seg_id < rowset->num_segments(); ++seg_id) { + rowset->rowset_id().to_string(); + DeleteBitmap::BitmapKey start {rowset->rowset_id(), seg_id, 0}; + DeleteBitmap::BitmapKey end {rowset->rowset_id(), seg_id, pre_max_version}; + DeleteBitmap::BitmapKey before_end {rowset->rowset_id(), seg_id, + pre_max_version - 1}; + auto d = _tablet->tablet_meta()->delete_bitmap().get_agg( + {rowset->rowset_id(), seg_id, pre_max_version}); + to_remove_vec.emplace_back( + std::make_tuple(_tablet->tablet_id(), start, before_end)); + if (d->isEmpty()) { + continue; + } + new_delete_bitmap->set(end, *d); + } + } + if (!new_delete_bitmap->empty()) { + // store agg delete bitmap + Status update_st; + DBUG_EXECUTE_IF("CloudCumulativeCompaction.modify_rowsets.update_delete_bitmap_failed", + { + update_st = Status::InternalError( + "test fail to update delete bitmap for tablet_id {}", + cloud_tablet()->tablet_id()); + }); + if (update_st.ok()) { + update_st = _engine.meta_mgr().update_delete_bitmap_without_lock( + *cloud_tablet(), new_delete_bitmap.get()); + } + if (!update_st.ok()) { + std::stringstream ss; + ss << "failed to update delete bitmap for tablet=" << cloud_tablet()->tablet_id() + << " st=" << update_st.to_string(); + std::string msg = ss.str(); + LOG(WARNING) << msg; + } else { + Version version(_input_rowsets.front()->start_version(), + _input_rowsets.back()->end_version()); + for (auto it = new_delete_bitmap->delete_bitmap.begin(); + it != new_delete_bitmap->delete_bitmap.end(); it++) { + _tablet->tablet_meta()->delete_bitmap().set(it->first, it->second); + } + _tablet->tablet_meta()->delete_bitmap().add_to_remove_queue(version.to_string(), + to_remove_vec); + DBUG_EXECUTE_IF( + "CloudCumulativeCompaction.modify_rowsets.delete_expired_stale_rowsets", { + static_cast(_tablet.get()) + ->delete_expired_stale_rowsets(); + }); + } + } + } +} + void CloudCumulativeCompaction::garbage_collection() { CloudCompactionMixin::garbage_collection(); cloud::TabletJobInfoPB job; diff --git a/be/src/cloud/cloud_cumulative_compaction.h b/be/src/cloud/cloud_cumulative_compaction.h index f353d0f51891fb..62c7cb44ea5bf5 100644 --- a/be/src/cloud/cloud_cumulative_compaction.h +++ b/be/src/cloud/cloud_cumulative_compaction.h @@ -47,6 +47,8 @@ class CloudCumulativeCompaction : public CloudCompactionMixin { void update_cumulative_point(); + void process_old_version_delete_bitmap(); + ReaderType compaction_type() const override { return ReaderType::READER_CUMULATIVE_COMPACTION; } std::string _uuid; diff --git a/be/src/cloud/cloud_cumulative_compaction_policy.cpp b/be/src/cloud/cloud_cumulative_compaction_policy.cpp index b8c4ee20cb2077..f9af469e56f60a 100644 --- a/be/src/cloud/cloud_cumulative_compaction_policy.cpp +++ b/be/src/cloud/cloud_cumulative_compaction_policy.cpp @@ -34,12 +34,11 @@ namespace doris { CloudSizeBasedCumulativeCompactionPolicy::CloudSizeBasedCumulativeCompactionPolicy( int64_t promotion_size, double promotion_ratio, int64_t promotion_min_size, - int64_t compaction_min_size, int64_t promotion_version_count) + int64_t compaction_min_size) : _promotion_size(promotion_size), _promotion_ratio(promotion_ratio), _promotion_min_size(promotion_min_size), - _compaction_min_size(compaction_min_size), - _promotion_version_count(promotion_version_count) {} + _compaction_min_size(compaction_min_size) {} int64_t CloudSizeBasedCumulativeCompactionPolicy::_level_size(const int64_t size) { if (size < 1024) return 0; @@ -205,7 +204,7 @@ int64_t CloudSizeBasedCumulativeCompactionPolicy::new_cumulative_point( // consider it's version count here. bool satisfy_promotion_version = tablet->enable_unique_key_merge_on_write() && output_rowset->end_version() - output_rowset->start_version() > - _promotion_version_count; + config::compaction_promotion_version_count; // if rowsets have delete version, move to the last directly. // if rowsets have no delete version, check output_rowset total disk size satisfies promotion size. return output_rowset->start_version() == last_cumulative_point && diff --git a/be/src/cloud/cloud_cumulative_compaction_policy.h b/be/src/cloud/cloud_cumulative_compaction_policy.h index 66068e9f3be186..c142a8a6d3dffe 100644 --- a/be/src/cloud/cloud_cumulative_compaction_policy.h +++ b/be/src/cloud/cloud_cumulative_compaction_policy.h @@ -59,8 +59,7 @@ class CloudSizeBasedCumulativeCompactionPolicy : public CloudCumulativeCompactio int64_t promotion_size = config::compaction_promotion_size_mbytes * 1024 * 1024, double promotion_ratio = config::compaction_promotion_ratio, int64_t promotion_min_size = config::compaction_promotion_min_size_mbytes * 1024 * 1024, - int64_t compaction_min_size = config::compaction_min_size_mbytes * 1024 * 1024, - int64_t promotion_version_count = config::compaction_promotion_version_count); + int64_t compaction_min_size = config::compaction_min_size_mbytes * 1024 * 1024); ~CloudSizeBasedCumulativeCompactionPolicy() override = default; @@ -94,8 +93,6 @@ class CloudSizeBasedCumulativeCompactionPolicy : public CloudCumulativeCompactio int64_t _promotion_min_size; /// lower bound size to do compaction compaction. int64_t _compaction_min_size; - // cumulative compaction promotion version count, only works for unique key MoW table - int64_t _promotion_version_count; }; class CloudTimeSeriesCumulativeCompactionPolicy : public CloudCumulativeCompactionPolicy { diff --git a/be/src/cloud/cloud_delete_bitmap_action.cpp b/be/src/cloud/cloud_delete_bitmap_action.cpp new file mode 100644 index 00000000000000..60db5896dfab8a --- /dev/null +++ b/be/src/cloud/cloud_delete_bitmap_action.cpp @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "cloud_delete_bitmap_action.h" + +#include +#include +#include +#include +#include + +#include // IWYU pragma: keep +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cloud/cloud_tablet.h" +#include "cloud/cloud_tablet_mgr.h" +#include "common/logging.h" +#include "common/status.h" +#include "gutil/strings/substitute.h" +#include "http/http_channel.h" +#include "http/http_headers.h" +#include "http/http_request.h" +#include "http/http_status.h" +#include "olap/olap_define.h" +#include "olap/storage_engine.h" +#include "olap/tablet_manager.h" +#include "util/doris_metrics.h" +#include "util/stopwatch.hpp" + +namespace doris { +using namespace ErrorCode; + +namespace { + +constexpr std::string_view HEADER_JSON = "application/json"; + +} // namespace + +CloudDeleteBitmapAction::CloudDeleteBitmapAction(DeleteBitmapActionType ctype, ExecEnv* exec_env, + CloudStorageEngine& engine, + TPrivilegeHier::type hier, + TPrivilegeType::type ptype) + : HttpHandlerWithAuth(exec_env, hier, ptype), + _engine(engine), + _delete_bitmap_action_type(ctype) {} + +static Status _check_param(HttpRequest* req, uint64_t* tablet_id) { + const auto& req_tablet_id = req->param(TABLET_ID_KEY); + if (req_tablet_id.empty()) { + return Status::InternalError("tablet id is empty!"); + } + try { + *tablet_id = std::stoull(req_tablet_id); + } catch (const std::exception& e) { + return Status::InternalError("convert tablet_id failed, {}", e.what()); + } + return Status::OK(); +} + +Status CloudDeleteBitmapAction::_handle_show_delete_bitmap_count(HttpRequest* req, + std::string* json_result) { + uint64_t tablet_id = 0; + // check & retrieve tablet_id from req if it contains + RETURN_NOT_OK_STATUS_WITH_WARN(_check_param(req, &tablet_id), "check param failed"); + if (tablet_id == 0) { + return Status::InternalError("check param failed: missing tablet_id"); + } + + CloudTabletSPtr tablet = DORIS_TRY(_engine.tablet_mgr().get_tablet(tablet_id)); + if (tablet == nullptr) { + return Status::NotFound("Tablet not found. tablet_id={}", tablet_id); + } + + auto count = tablet->tablet_meta()->delete_bitmap().get_delete_bitmap_count(); + auto cardinality = tablet->tablet_meta()->delete_bitmap().cardinality(); + auto size = tablet->tablet_meta()->delete_bitmap().get_size(); + + rapidjson::Document root; + root.SetObject(); + root.AddMember("delete_bitmap_count", count, root.GetAllocator()); + root.AddMember("cardinality", cardinality, root.GetAllocator()); + root.AddMember("size", size, root.GetAllocator()); + + // to json string + rapidjson::StringBuffer strbuf; + rapidjson::PrettyWriter writer(strbuf); + root.Accept(writer); + *json_result = std::string(strbuf.GetString()); + + return Status::OK(); +} + +void CloudDeleteBitmapAction::handle(HttpRequest* req) { + req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.data()); + if (_delete_bitmap_action_type == DeleteBitmapActionType::COUNT_INFO) { + std::string json_result; + Status st = _handle_show_delete_bitmap_count(req, &json_result); + if (!st.ok()) { + HttpChannel::send_reply(req, HttpStatus::OK, st.to_json()); + } else { + HttpChannel::send_reply(req, HttpStatus::OK, json_result); + } + } +} + +} // namespace doris \ No newline at end of file diff --git a/be/src/cloud/cloud_delete_bitmap_action.h b/be/src/cloud/cloud_delete_bitmap_action.h new file mode 100644 index 00000000000000..9321661374c195 --- /dev/null +++ b/be/src/cloud/cloud_delete_bitmap_action.h @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include + +#include "cloud/cloud_storage_engine.h" +#include "common/status.h" +#include "http/http_handler_with_auth.h" +#include "olap/tablet.h" + +namespace doris { +class HttpRequest; + +class ExecEnv; + +enum class DeleteBitmapActionType { COUNT_INFO = 1 }; + +/// This action is used for viewing the delete bitmap status +class CloudDeleteBitmapAction : public HttpHandlerWithAuth { +public: + CloudDeleteBitmapAction(DeleteBitmapActionType ctype, ExecEnv* exec_env, + CloudStorageEngine& engine, TPrivilegeHier::type hier, + TPrivilegeType::type ptype); + + ~CloudDeleteBitmapAction() override = default; + + void handle(HttpRequest* req) override; + +private: + Status _handle_show_delete_bitmap_count(HttpRequest* req, std::string* json_result); + +private: + CloudStorageEngine& _engine; + DeleteBitmapActionType _delete_bitmap_action_type; +}; +} // namespace doris \ No newline at end of file diff --git a/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp b/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp index 33b9e51c7cbcf2..6abc3958650ef6 100644 --- a/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp +++ b/be/src/cloud/cloud_engine_calc_delete_bitmap_task.cpp @@ -87,6 +87,7 @@ Status CloudEngineCalcDeleteBitmapTask::execute() { LOG(WARNING) << "handle calc delete bitmap fail, st=" << st.to_string(); } }); + VLOG_DEBUG << "submit TabletCalcDeleteBitmapTask for tablet=" << tablet_id; if (!submit_st.ok()) { _res = submit_st; break; @@ -95,6 +96,7 @@ Status CloudEngineCalcDeleteBitmapTask::execute() { } // wait for all finished token->wait(); + DBUG_EXECUTE_IF("CloudEngineCalcDeleteBitmapTask.execute.enable_wait", { sleep(3); }); LOG(INFO) << "finish to calculate delete bitmap on transaction." << "transaction_id=" << transaction_id << ", cost(us): " << watch.get_elapse_time_us() @@ -125,6 +127,7 @@ void CloudTabletCalcDeleteBitmapTask::set_compaction_stats(int64_t ms_base_compa } Status CloudTabletCalcDeleteBitmapTask::handle() const { + VLOG_DEBUG << "start calculate delete bitmap on tablet " << _tablet_id; SCOPED_ATTACH_TASK(_mem_tracker); int64_t t1 = MonotonicMicros(); auto base_tablet = DORIS_TRY(_engine.get_tablet(_tablet_id)); @@ -174,8 +177,12 @@ Status CloudTabletCalcDeleteBitmapTask::handle() const { auto sync_rowset_time_us = MonotonicMicros() - t2; max_version = tablet->max_version_unlocked(); if (_version != max_version + 1) { - LOG(WARNING) << "version not continuous, current max version=" << max_version - << ", request_version=" << _version << " tablet_id=" << _tablet_id; + bool need_log = (config::publish_version_gap_logging_threshold < 0 || + max_version + config::publish_version_gap_logging_threshold >= _version); + if (need_log) { + LOG(WARNING) << "version not continuous, current max version=" << max_version + << ", request_version=" << _version << " tablet_id=" << _tablet_id; + } auto error_st = Status::Error("version not continuous"); _engine_calc_delete_bitmap_task->add_error_tablet_id(_tablet_id, error_st); diff --git a/be/src/cloud/cloud_meta_mgr.cpp b/be/src/cloud/cloud_meta_mgr.cpp index 36217ddb61de7d..071c15d11e5516 100644 --- a/be/src/cloud/cloud_meta_mgr.cpp +++ b/be/src/cloud/cloud_meta_mgr.cpp @@ -290,6 +290,8 @@ static std::string debug_info(const Request& req) { return ""; } else if constexpr (is_any_v) { return fmt::format(" tablet_id={}", req.rowset_meta().tablet_id()); + } else if constexpr (is_any_v) { + return fmt::format(" tablet_id={}", req.tablet_id()); } else { static_assert(!sizeof(Request)); } @@ -1046,16 +1048,43 @@ Status CloudMetaMgr::update_delete_bitmap(const CloudTablet& tablet, int64_t loc return st; } +Status CloudMetaMgr::update_delete_bitmap_without_lock(const CloudTablet& tablet, + DeleteBitmap* delete_bitmap) { + VLOG_DEBUG << "update_delete_bitmap_without_lock , tablet_id: " << tablet.tablet_id(); + UpdateDeleteBitmapRequest req; + UpdateDeleteBitmapResponse res; + req.set_cloud_unique_id(config::cloud_unique_id); + req.set_table_id(tablet.table_id()); + req.set_partition_id(tablet.partition_id()); + req.set_tablet_id(tablet.tablet_id()); + // use a fake lock id to resolve compatibility issues + req.set_lock_id(-3); + req.set_unlock(true); + for (auto& [key, bitmap] : delete_bitmap->delete_bitmap) { + req.add_rowset_ids(std::get<0>(key).to_string()); + req.add_segment_ids(std::get<1>(key)); + req.add_versions(std::get<2>(key)); + // To save space, convert array and bitmap containers to run containers + bitmap.runOptimize(); + std::string bitmap_data(bitmap.getSizeInBytes(), '\0'); + bitmap.write(bitmap_data.data()); + *(req.add_segment_delete_bitmaps()) = std::move(bitmap_data); + } + return retry_rpc("update delete bitmap", req, &res, &MetaService_Stub::update_delete_bitmap); +} + Status CloudMetaMgr::get_delete_bitmap_update_lock(const CloudTablet& tablet, int64_t lock_id, int64_t initiator) { - VLOG_DEBUG << "get_delete_bitmap_update_lock , tablet_id: " << tablet.tablet_id(); + VLOG_DEBUG << "get_delete_bitmap_update_lock , tablet_id: " << tablet.tablet_id() + << ",lock_id:" << lock_id; GetDeleteBitmapUpdateLockRequest req; GetDeleteBitmapUpdateLockResponse res; req.set_cloud_unique_id(config::cloud_unique_id); req.set_table_id(tablet.table_id()); req.set_lock_id(lock_id); req.set_initiator(initiator); - req.set_expiration(10); // 10s expiration time for compaction and schema_change + // set expiration time for compaction and schema_change + req.set_expiration(config::delete_bitmap_lock_expiration_seconds); int retry_times = 0; Status st; std::default_random_engine rng = make_random_engine(); @@ -1076,4 +1105,22 @@ Status CloudMetaMgr::get_delete_bitmap_update_lock(const CloudTablet& tablet, in return st; } +Status CloudMetaMgr::remove_old_version_delete_bitmap( + int64_t tablet_id, + const std::vector>& to_delete) { + LOG(INFO) << "remove_old_version_delete_bitmap , tablet_id: " << tablet_id; + RemoveDeleteBitmapRequest req; + RemoveDeleteBitmapResponse res; + req.set_cloud_unique_id(config::cloud_unique_id); + req.set_tablet_id(tablet_id); + for (auto& value : to_delete) { + req.add_rowset_ids(std::get<0>(value)); + req.add_begin_versions(std::get<1>(value)); + req.add_end_versions(std::get<2>(value)); + } + auto st = retry_rpc("remove old delete bitmap", req, &res, + &MetaService_Stub::remove_delete_bitmap); + return st; +} + } // namespace doris::cloud diff --git a/be/src/cloud/cloud_meta_mgr.h b/be/src/cloud/cloud_meta_mgr.h index 2f776b056866aa..79cdb3fd3d1f8c 100644 --- a/be/src/cloud/cloud_meta_mgr.h +++ b/be/src/cloud/cloud_meta_mgr.h @@ -95,9 +95,16 @@ class CloudMetaMgr { Status update_delete_bitmap(const CloudTablet& tablet, int64_t lock_id, int64_t initiator, DeleteBitmap* delete_bitmap); + Status update_delete_bitmap_without_lock(const CloudTablet& tablet, + DeleteBitmap* delete_bitmap); + Status get_delete_bitmap_update_lock(const CloudTablet& tablet, int64_t lock_id, int64_t initiator); + Status remove_old_version_delete_bitmap( + int64_t tablet_id, + const std::vector>& to_delete); + private: bool sync_tablet_delete_bitmap_by_cache(CloudTablet* tablet, int64_t old_max_version, std::ranges::range auto&& rs_metas, diff --git a/be/src/cloud/cloud_storage_engine.cpp b/be/src/cloud/cloud_storage_engine.cpp index 1cf6e2ac039828..4f452656a6236b 100644 --- a/be/src/cloud/cloud_storage_engine.cpp +++ b/be/src/cloud/cloud_storage_engine.cpp @@ -257,7 +257,7 @@ Status CloudStorageEngine::start_bg_threads() { // add calculate tablet delete bitmap task thread pool RETURN_IF_ERROR(ThreadPoolBuilder("TabletCalDeleteBitmapThreadPool") - .set_min_threads(1) + .set_min_threads(config::calc_tablet_delete_bitmap_task_max_thread) .set_max_threads(config::calc_tablet_delete_bitmap_task_max_thread) .build(&_calc_tablet_delete_bitmap_task_thread_pool)); diff --git a/be/src/cloud/cloud_storage_engine.h b/be/src/cloud/cloud_storage_engine.h index d3a55c3c377276..92d2917a916f6a 100644 --- a/be/src/cloud/cloud_storage_engine.h +++ b/be/src/cloud/cloud_storage_engine.h @@ -76,16 +76,12 @@ class CloudStorageEngine final : public BaseStorageEngine { std::optional get_storage_resource(const std::string& vault_id) { LOG(INFO) << "Getting storage resource for vault_id: " << vault_id; - if (vault_id.empty()) { - if (latest_fs() == nullptr) { - LOG(INFO) << "there is not latest fs"; - return std::nullopt; - } - return StorageResource {latest_fs()}; - } bool synced = false; do { + if (vault_id.empty() && latest_fs() != nullptr) { + return StorageResource {latest_fs()}; + } if (auto storage_resource = doris::get_storage_resource(vault_id); storage_resource) { return storage_resource->first; } diff --git a/be/src/cloud/cloud_tablet.cpp b/be/src/cloud/cloud_tablet.cpp index 7433b781c65d99..06f7e97e0c475d 100644 --- a/be/src/cloud/cloud_tablet.cpp +++ b/be/src/cloud/cloud_tablet.cpp @@ -96,6 +96,7 @@ Status CloudTablet::capture_rs_readers(const Version& spec_version, auto missed_versions = get_missed_versions(spec_version.second); if (missed_versions.empty()) { st.set_code(VERSION_ALREADY_MERGED); // Reset error code + st.append(" versions are already compacted, "); } st.append(" tablet_id=" + std::to_string(tablet_id())); // clang-format off @@ -363,6 +364,7 @@ int CloudTablet::delete_expired_stale_rowsets() { std::vector expired_rowsets; int64_t expired_stale_sweep_endtime = ::time(nullptr) - config::tablet_rowset_stale_sweep_time_sec; + std::vector version_to_delete; { std::unique_lock wlock(_meta_lock); @@ -375,6 +377,8 @@ int CloudTablet::delete_expired_stale_rowsets() { } for (int64_t path_id : path_ids) { + int start_version = -1; + int end_version = -1; // delete stale versions in version graph auto version_path = _timestamped_version_tracker.fetch_and_delete_path_by_id(path_id); for (auto& v_ts : version_path->timestamped_versions()) { @@ -389,12 +393,18 @@ int CloudTablet::delete_expired_stale_rowsets() { DCHECK(false) << [this, &wlock]() { wlock.unlock(); std::string json; get_compaction_status(&json); return json; }(); // clang-format on } + if (start_version < 0) { + start_version = v_ts->version().first; + } + end_version = v_ts->version().second; _tablet_meta->delete_stale_rs_meta_by_version(v_ts->version()); - VLOG_DEBUG << "delete stale rowset " << v_ts->version(); } + Version version(start_version, end_version); + version_to_delete.emplace_back(version.to_string()); } _reconstruct_version_tracker_if_necessary(); } + _tablet_meta->delete_bitmap().remove_stale_delete_bitmap_from_queue(version_to_delete); recycle_cached_data(expired_rowsets); return expired_rowsets.size(); } @@ -658,8 +668,8 @@ Status CloudTablet::save_delete_bitmap(const TabletTxnInfo* txn_info, int64_t tx RowsetSharedPtr rowset = txn_info->rowset; int64_t cur_version = rowset->start_version(); // update delete bitmap info, in order to avoid recalculation when trying again - _engine.txn_delete_bitmap_cache().update_tablet_txn_info( - txn_id, tablet_id(), delete_bitmap, cur_rowset_ids, PublishStatus::PREPARE); + RETURN_IF_ERROR(_engine.txn_delete_bitmap_cache().update_tablet_txn_info( + txn_id, tablet_id(), delete_bitmap, cur_rowset_ids, PublishStatus::PREPARE)); if (txn_info->partial_update_info && txn_info->partial_update_info->is_partial_update && rowset_writer->num_rows() > 0) { @@ -684,9 +694,9 @@ Status CloudTablet::save_delete_bitmap(const TabletTxnInfo* txn_info, int64_t tx // store the delete bitmap with sentinel marks in txn_delete_bitmap_cache because if the txn is retried for some reason, // it will use the delete bitmap from txn_delete_bitmap_cache when re-calculating the delete bitmap, during which it will do // delete bitmap correctness check. If we store the new_delete_bitmap, the delete bitmap correctness check will fail - _engine.txn_delete_bitmap_cache().update_tablet_txn_info(txn_id, tablet_id(), delete_bitmap, - cur_rowset_ids, PublishStatus::SUCCEED, - txn_info->publish_info); + RETURN_IF_ERROR(_engine.txn_delete_bitmap_cache().update_tablet_txn_info( + txn_id, tablet_id(), delete_bitmap, cur_rowset_ids, PublishStatus::SUCCEED, + txn_info->publish_info)); return Status::OK(); } @@ -724,7 +734,7 @@ Versions CloudTablet::calc_missed_versions(int64_t spec_version, Versions existi Status CloudTablet::calc_delete_bitmap_for_compaction( const std::vector& input_rowsets, const RowsetSharedPtr& output_rowset, const RowIdConversion& rowid_conversion, ReaderType compaction_type, int64_t merged_rows, - int64_t initiator, DeleteBitmapPtr& output_rowset_delete_bitmap, + int64_t filtered_rows, int64_t initiator, DeleteBitmapPtr& output_rowset_delete_bitmap, bool allow_delete_in_cumu_compaction) { output_rowset_delete_bitmap = std::make_shared(tablet_id()); std::set missed_rows; @@ -740,11 +750,12 @@ Status CloudTablet::calc_delete_bitmap_for_compaction( if (!allow_delete_in_cumu_compaction) { if (compaction_type == ReaderType::READER_CUMULATIVE_COMPACTION && tablet_state() == TABLET_RUNNING) { - if (merged_rows >= 0 && merged_rows != missed_rows_size) { + if (merged_rows + filtered_rows >= 0 && + merged_rows + filtered_rows != missed_rows_size) { std::string err_msg = fmt::format( - "cumulative compaction: the merged rows({}) is not equal to missed " - "rows({}) in rowid conversion, tablet_id: {}, table_id:{}", - merged_rows, missed_rows_size, tablet_id(), table_id()); + "cumulative compaction: the merged rows({}), the filtered rows({}) is not " + "equal to missed rows({}) in rowid conversion, tablet_id: {}, table_id:{}", + merged_rows, filtered_rows, missed_rows_size, tablet_id(), table_id()); if (config::enable_mow_compaction_correctness_check_core) { CHECK(false) << err_msg; } else { diff --git a/be/src/cloud/cloud_tablet.h b/be/src/cloud/cloud_tablet.h index 2bd1ce475028ab..53747dc19e27de 100644 --- a/be/src/cloud/cloud_tablet.h +++ b/be/src/cloud/cloud_tablet.h @@ -176,7 +176,7 @@ class CloudTablet final : public BaseTablet { const RowsetSharedPtr& output_rowset, const RowIdConversion& rowid_conversion, ReaderType compaction_type, int64_t merged_rows, - int64_t initiator, + int64_t filtered_rows, int64_t initiator, DeleteBitmapPtr& output_rowset_delete_bitmap, bool allow_delete_in_cumu_compaction); diff --git a/be/src/cloud/cloud_tablet_mgr.cpp b/be/src/cloud/cloud_tablet_mgr.cpp index 0fe050d02dbd3d..e5c31785c1eb1c 100644 --- a/be/src/cloud/cloud_tablet_mgr.cpp +++ b/be/src/cloud/cloud_tablet_mgr.cpp @@ -136,7 +136,7 @@ class CloudTabletMgr::TabletMap { CloudTabletMgr::CloudTabletMgr(CloudStorageEngine& engine) : _engine(engine), _tablet_map(std::make_unique()), - _cache(std::make_unique( + _cache(std::make_unique( CachePolicy::CacheType::CLOUD_TABLET_CACHE, config::tablet_cache_capacity, LRUCacheType::NUMBER, 0, config::tablet_cache_shards)) {} diff --git a/be/src/cloud/cloud_txn_delete_bitmap_cache.cpp b/be/src/cloud/cloud_txn_delete_bitmap_cache.cpp index c6a3b54edc3f67..63a21bc0714beb 100644 --- a/be/src/cloud/cloud_txn_delete_bitmap_cache.cpp +++ b/be/src/cloud/cloud_txn_delete_bitmap_cache.cpp @@ -23,6 +23,7 @@ #include #include +#include "cloud/config.h" #include "common/status.h" #include "cpp/sync_point.h" #include "olap/olap_common.h" @@ -32,8 +33,8 @@ namespace doris { CloudTxnDeleteBitmapCache::CloudTxnDeleteBitmapCache(size_t size_in_bytes) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::CLOUD_TXN_DELETE_BITMAP_CACHE, - size_in_bytes, LRUCacheType::SIZE, 86400, 4), + : LRUCachePolicy(CachePolicy::CacheType::CLOUD_TXN_DELETE_BITMAP_CACHE, size_in_bytes, + LRUCacheType::SIZE, 86400, 4), _stop_latch(1) {} CloudTxnDeleteBitmapCache::~CloudTxnDeleteBitmapCache() { @@ -119,12 +120,11 @@ void CloudTxnDeleteBitmapCache::set_tablet_txn_info( TTransactionId transaction_id, int64_t tablet_id, DeleteBitmapPtr delete_bitmap, const RowsetIdUnorderedSet& rowset_ids, RowsetSharedPtr rowset, int64_t txn_expiration, std::shared_ptr partial_update_info) { - if (txn_expiration <= 0) { - txn_expiration = duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count() + - 120; - } + int64_t txn_expiration_min = + duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count() + + config::tablet_txn_info_min_expired_seconds; + txn_expiration = std::max(txn_expiration_min, txn_expiration); { std::unique_lock wlock(_rwlock); TxnKey txn_key(transaction_id, tablet_id); @@ -153,16 +153,21 @@ void CloudTxnDeleteBitmapCache::set_tablet_txn_info( .tag("delete_bitmap_size", charge); } -void CloudTxnDeleteBitmapCache::update_tablet_txn_info(TTransactionId transaction_id, - int64_t tablet_id, - DeleteBitmapPtr delete_bitmap, - const RowsetIdUnorderedSet& rowset_ids, - PublishStatus publish_status, - TxnPublishInfo publish_info) { +Status CloudTxnDeleteBitmapCache::update_tablet_txn_info(TTransactionId transaction_id, + int64_t tablet_id, + DeleteBitmapPtr delete_bitmap, + const RowsetIdUnorderedSet& rowset_ids, + PublishStatus publish_status, + TxnPublishInfo publish_info) { { std::unique_lock wlock(_rwlock); TxnKey txn_key(transaction_id, tablet_id); - CHECK(_txn_map.contains(txn_key)); + if (!_txn_map.contains(txn_key)) { + return Status::Error( + "not found txn info, tablet_id={}, transaction_id={}, may be expired and be " + "removed", + tablet_id, transaction_id); + } TxnVal& txn_val = _txn_map[txn_key]; *(txn_val.publish_status) = publish_status; if (publish_status == PublishStatus::SUCCEED) { @@ -184,7 +189,9 @@ void CloudTxnDeleteBitmapCache::update_tablet_txn_info(TTransactionId transactio LOG_INFO("update txn related delete bitmap") .tag("txn_id", transaction_id) .tag("tablt_id", tablet_id) - .tag("delete_bitmap_size", charge); + .tag("delete_bitmap_size", charge) + .tag("publish_status", static_cast(publish_status)); + return Status::OK(); } void CloudTxnDeleteBitmapCache::remove_expired_tablet_txn_info() { @@ -238,7 +245,8 @@ void CloudTxnDeleteBitmapCache::remove_unused_tablet_txn_info(TTransactionId tra void CloudTxnDeleteBitmapCache::_clean_thread_callback() { do { remove_expired_tablet_txn_info(); - } while (!_stop_latch.wait_for(std::chrono::seconds(300))); + } while (!_stop_latch.wait_for( + std::chrono::seconds(config::remove_expired_tablet_txn_info_interval_seconds))); } } // namespace doris \ No newline at end of file diff --git a/be/src/cloud/cloud_txn_delete_bitmap_cache.h b/be/src/cloud/cloud_txn_delete_bitmap_cache.h index 75577ae2e3fee0..91a0531c60ae04 100644 --- a/be/src/cloud/cloud_txn_delete_bitmap_cache.h +++ b/be/src/cloud/cloud_txn_delete_bitmap_cache.h @@ -30,7 +30,7 @@ namespace doris { // Record transaction related delete bitmaps using a lru cache. -class CloudTxnDeleteBitmapCache : public LRUCachePolicyTrackingManual { +class CloudTxnDeleteBitmapCache : public LRUCachePolicy { public: CloudTxnDeleteBitmapCache(size_t size_in_bytes); @@ -50,10 +50,10 @@ class CloudTxnDeleteBitmapCache : public LRUCachePolicyTrackingManual { RowsetSharedPtr rowset, int64_t txn_expirationm, std::shared_ptr partial_update_info); - void update_tablet_txn_info(TTransactionId transaction_id, int64_t tablet_id, - DeleteBitmapPtr delete_bitmap, - const RowsetIdUnorderedSet& rowset_ids, - PublishStatus publish_status, TxnPublishInfo publish_info = {}); + Status update_tablet_txn_info(TTransactionId transaction_id, int64_t tablet_id, + DeleteBitmapPtr delete_bitmap, + const RowsetIdUnorderedSet& rowset_ids, + PublishStatus publish_status, TxnPublishInfo publish_info = {}); void remove_expired_tablet_txn_info(); diff --git a/be/src/cloud/config.cpp b/be/src/cloud/config.cpp index 0f59b51059b69e..e724dbea84e10c 100644 --- a/be/src/cloud/config.cpp +++ b/be/src/cloud/config.cpp @@ -22,7 +22,6 @@ namespace doris::config { DEFINE_String(deploy_mode, ""); -DEFINE_mString(cloud_instance_id, ""); DEFINE_mString(cloud_unique_id, ""); DEFINE_mString(meta_service_endpoint, ""); DEFINE_Bool(meta_service_use_load_balancer, "false"); @@ -66,12 +65,14 @@ DEFINE_mInt32(sync_load_for_tablets_thread, "32"); DEFINE_mBool(enable_new_tablet_do_compaction, "false"); +DEFINE_Int32(delete_bitmap_lock_expiration_seconds, "10"); + DEFINE_Bool(enable_cloud_txn_lazy_commit, "false"); -void set_cloud_unique_id(std::string instance_id) { - if (cloud_unique_id.empty() && !instance_id.empty()) { - static_cast(set_config("cloud_unique_id", "1:" + instance_id + ":compute", true)); - } -} +DEFINE_mInt32(remove_expired_tablet_txn_info_interval_seconds, "300"); + +DEFINE_mInt32(tablet_txn_info_min_expired_seconds, "120"); + +DEFINE_mBool(enable_use_cloud_unique_id_from_fe, "true"); } // namespace doris::config diff --git a/be/src/cloud/config.h b/be/src/cloud/config.h index 57f6348df7067b..86197f924d0cad 100644 --- a/be/src/cloud/config.h +++ b/be/src/cloud/config.h @@ -23,15 +23,12 @@ namespace doris::config { DECLARE_String(deploy_mode); // deprecated do not configure directly -DECLARE_mString(cloud_instance_id); DECLARE_mString(cloud_unique_id); static inline bool is_cloud_mode() { return deploy_mode == "cloud" || !cloud_unique_id.empty(); } -void set_cloud_unique_id(std::string instance_id); - // Set the endpoint of meta service. // // If meta services are deployed behind a load balancer, set this config to "host:port" of the load balancer. @@ -100,7 +97,15 @@ DECLARE_mBool(save_load_error_log_to_s3); // the theads which sync the datas which loaded in other clusters DECLARE_mInt32(sync_load_for_tablets_thread); +DECLARE_Int32(delete_bitmap_lock_expiration_seconds); + // enable large txn lazy commit in meta-service `commit_txn` DECLARE_mBool(enable_cloud_txn_lazy_commit); +DECLARE_mInt32(remove_expired_tablet_txn_info_interval_seconds); + +DECLARE_mInt32(tablet_txn_info_min_expired_seconds); + +DECLARE_mBool(enable_use_cloud_unique_id_from_fe); + } // namespace doris::config diff --git a/be/src/clucene b/be/src/clucene index c5d02a7e41194b..5e9566ab364d71 160000 --- a/be/src/clucene +++ b/be/src/clucene @@ -1 +1 @@ -Subproject commit c5d02a7e41194b02444be6d684e3aeb4ff1b5595 +Subproject commit 5e9566ab364d71b64c436ee46e5c848eed0ab7f7 diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp index 06144dd3142b25..58679fbe9b4245 100644 --- a/be/src/common/config.cpp +++ b/be/src/common/config.cpp @@ -150,6 +150,9 @@ DEFINE_mInt64(stacktrace_in_alloc_large_memory_bytes, "2147483648"); DEFINE_mInt64(crash_in_alloc_large_memory_bytes, "-1"); +// If memory tracker value is inaccurate, BE will crash. usually used in test environments, default value is false. +DEFINE_mBool(crash_in_memory_tracker_inaccurate, "false"); + // default is true. if any memory tracking in Orphan mem tracker will report error. // !! not modify the default value of this conf!! otherwise memory errors cannot be detected in time. // allocator free memory not need to check, because when the thread memory tracker label is Orphan, @@ -513,8 +516,12 @@ DEFINE_Int32(brpc_heavy_work_pool_max_queue_size, "-1"); DEFINE_Int32(brpc_light_work_pool_max_queue_size, "-1"); DEFINE_mBool(enable_bthread_transmit_block, "true"); +//Enable brpc builtin services, see: +//https://brpc.apache.org/docs/server/basics/#disable-built-in-services-completely +DEFINE_Bool(enable_brpc_builtin_services, "true"); + // The maximum amount of data that can be processed by a stream load -DEFINE_mInt64(streaming_load_max_mb, "10240"); +DEFINE_mInt64(streaming_load_max_mb, "102400"); // Some data formats, such as JSON, cannot be streamed. // Therefore, it is necessary to limit the maximum number of // such data when using stream load to prevent excessive memory consumption. @@ -591,14 +598,6 @@ DEFINE_mInt32(memory_maintenance_sleep_time_ms, "20"); // After minor gc, no minor gc during sleep, but full gc is possible. DEFINE_mInt32(memory_gc_sleep_time_ms, "500"); -// percent of (active memtables size / all memtables size) when reach hard limit -DEFINE_mInt32(memtable_hard_limit_active_percent, "50"); - -// percent of (active memtables size / all memtables size) when reach soft limit -DEFINE_mInt32(memtable_soft_limit_active_percent, "50"); - -// memtable insert memory tracker will multiply input block size with this ratio -DEFINE_mDouble(memtable_insert_memory_ratio, "1.4"); // max write buffer size before flush, default 200MB DEFINE_mInt64(write_buffer_size, "209715200"); // max buffer size used in memtable for the aggregated table, default 400MB @@ -1108,12 +1107,14 @@ DEFINE_mBool(enable_missing_rows_correctness_check, "false"); // When the number of missing versions is more than this value, do not directly // retry the publish and handle it through async publish. DEFINE_mInt32(mow_publish_max_discontinuous_version_num, "20"); +// When the version is not continuous for MOW table in publish phase and the gap between +// current txn's publishing version and the max version of the tablet exceeds this value, +// don't print warning log +DEFINE_mInt32(publish_version_gap_logging_threshold, "200"); // The secure path with user files, used in the `local` table function. DEFINE_mString(user_files_secure_path, "${DORIS_HOME}"); -DEFINE_Int32(partition_topn_partition_threshold, "1024"); - DEFINE_Int32(fe_expire_duration_seconds, "60"); DEFINE_Int32(grace_shutdown_wait_seconds, "120"); @@ -1667,8 +1668,6 @@ bool init(const char* conf_file, bool fill_conf_map, bool must_exist, bool set_t SET_FIELD(it.second, std::vector, fill_conf_map, set_to_default); } - set_cloud_unique_id(cloud_instance_id); - return true; } diff --git a/be/src/common/config.h b/be/src/common/config.h index cc26f52abbab23..56a9357e72e798 100644 --- a/be/src/common/config.h +++ b/be/src/common/config.h @@ -194,11 +194,15 @@ DECLARE_mBool(enable_stacktrace); // if alloc failed using Doris Allocator, will print stacktrace in error log. // if is -1, disable print stacktrace when alloc large memory. DECLARE_mInt64(stacktrace_in_alloc_large_memory_bytes); + // when alloc memory larger than crash_in_alloc_large_memory_bytes will crash, default -1 means disabled. // if you need a core dump to analyze large memory allocation, // modify this parameter to crash when large memory allocation occur will help DECLARE_mInt64(crash_in_alloc_large_memory_bytes); +// If memory tracker value is inaccurate, BE will crash. usually used in test environments, default value is false. +DECLARE_mBool(crash_in_memory_tracker_inaccurate); + // default is true. if any memory tracking in Orphan mem tracker will report error. // !! not modify the default value of this conf!! otherwise memory errors cannot be detected in time. // allocator free memory not need to check, because when the thread memory tracker label is Orphan, @@ -644,14 +648,6 @@ DECLARE_mInt32(memory_maintenance_sleep_time_ms); // After minor gc, no minor gc during sleep, but full gc is possible. DECLARE_mInt32(memory_gc_sleep_time_ms); -// percent of (active memtables size / all memtables size) when reach hard limit -DECLARE_mInt32(memtable_hard_limit_active_percent); - -// percent of (active memtables size / all memtables size) when reach soft limit -DECLARE_mInt32(memtable_soft_limit_active_percent); - -// memtable insert memory tracker will multiply input block size with this ratio -DECLARE_mDouble(memtable_insert_memory_ratio); // max write buffer size before flush, default 200MB DECLARE_mInt64(write_buffer_size); // max buffer size used in memtable for the aggregated table, default 400MB @@ -984,6 +980,8 @@ DECLARE_mInt64(nodechannel_pending_queue_max_bytes); // The batch size for sending data by brpc streaming client DECLARE_mInt64(brpc_streaming_client_batch_bytes); +DECLARE_Bool(enable_brpc_builtin_services); + // Max waiting time to wait the "plan fragment start" rpc. // If timeout, the fragment will be cancelled. // This parameter is usually only used when the FE loses connection, @@ -1179,14 +1177,14 @@ DECLARE_mBool(enable_missing_rows_correctness_check); // When the number of missing versions is more than this value, do not directly // retry the publish and handle it through async publish. DECLARE_mInt32(mow_publish_max_discontinuous_version_num); +// When the version is not continuous for MOW table in publish phase and the gap between +// current txn's publishing version and the max version of the tablet exceeds this value, +// don't print warning log +DECLARE_mInt32(publish_version_gap_logging_threshold); // The secure path with user files, used in the `local` table function. DECLARE_mString(user_files_secure_path); -// This threshold determines how many partitions will be allocated for window function get topn. -// and if this threshold is exceeded, the remaining data will be pass through to other node directly. -DECLARE_Int32(partition_topn_partition_threshold); - // If fe's frontend info has not been updated for more than fe_expire_duration_seconds, it will be regarded // as an abnormal fe, this will cause be to cancel this fe's related query. DECLARE_Int32(fe_expire_duration_seconds); diff --git a/be/src/common/daemon.cpp b/be/src/common/daemon.cpp index 713813b4a334f9..5da49758865c1c 100644 --- a/be/src/common/daemon.cpp +++ b/be/src/common/daemon.cpp @@ -58,6 +58,7 @@ #include "runtime/memory/memory_reclamation.h" #include "runtime/runtime_query_statistics_mgr.h" #include "runtime/workload_group/workload_group_manager.h" +#include "util/algorithm_util.h" #include "util/cpu_info.h" #include "util/debug_util.h" #include "util/disk_info.h" @@ -242,17 +243,16 @@ void refresh_memory_state_after_memory_change() { void refresh_cache_capacity() { if (refresh_cache_capacity_sleep_time_ms <= 0) { - auto cache_capacity_reduce_mem_limit = uint64_t( + auto cache_capacity_reduce_mem_limit = int64_t( doris::MemInfo::soft_mem_limit() * config::cache_capacity_reduce_mem_limit_frac); int64_t process_memory_usage = doris::GlobalMemoryArbitrator::process_memory_usage(); + // the rule is like this: + // 1. if the process mem usage < soft memlimit * 0.6, then do not need adjust cache capacity. + // 2. if the process mem usage > soft memlimit * 0.6 and process mem usage < soft memlimit, then it will be adjusted to a lower value. + // 3. if the process mem usage > soft memlimit, then the capacity is adjusted to 0. double new_cache_capacity_adjust_weighted = - process_memory_usage <= cache_capacity_reduce_mem_limit - ? 1 - : std::min( - 1 - (process_memory_usage - cache_capacity_reduce_mem_limit) / - (doris::MemInfo::soft_mem_limit() - - cache_capacity_reduce_mem_limit), - 0); + AlgoUtil::descent_by_step(10, cache_capacity_reduce_mem_limit, + doris::MemInfo::soft_mem_limit(), process_memory_usage); if (new_cache_capacity_adjust_weighted != doris::GlobalMemoryArbitrator::last_cache_capacity_adjust_weighted) { doris::GlobalMemoryArbitrator::last_cache_capacity_adjust_weighted = diff --git a/be/src/exec/olap_common.h b/be/src/exec/olap_common.h index 9ba1b81d1b8f11..a4180938dfc536 100644 --- a/be/src/exec/olap_common.h +++ b/be/src/exec/olap_common.h @@ -375,7 +375,7 @@ class OlapScanKeys { template Status extend_scan_key(ColumnValueRange& range, int32_t max_scan_key_num, - bool* exact_value, bool* eos); + bool* exact_value, bool* eos, bool* should_break); Status get_key_range(std::vector>* key_range); @@ -993,7 +993,8 @@ bool ColumnValueRange::has_intersection(ColumnValueRange Status OlapScanKeys::extend_scan_key(ColumnValueRange& range, - int32_t max_scan_key_num, bool* exact_value, bool* eos) { + int32_t max_scan_key_num, bool* exact_value, bool* eos, + bool* should_break) { using CppType = typename PrimitiveTypeTraits::CppType; using ConstIterator = typename std::set::const_iterator; @@ -1017,6 +1018,7 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range, range.convert_to_range_value(); *exact_value = false; } else { + *should_break = true; return Status::OK(); } } diff --git a/be/src/exec/rowid_fetcher.cpp b/be/src/exec/rowid_fetcher.cpp index 9915f1b626a21b..6ca790aed99b63 100644 --- a/be/src/exec/rowid_fetcher.cpp +++ b/be/src/exec/rowid_fetcher.cpp @@ -241,6 +241,10 @@ Status RowIDFetcher::fetch(const vectorized::ColumnPtr& column_row_ids, std::vector rows_locs; rows_locs.reserve(rows_locs.size()); RETURN_IF_ERROR(_merge_rpc_results(mget_req, resps, cntls, res_block, &rows_locs)); + if (rows_locs.size() < column_row_ids->size()) { + return Status::InternalError("Miss matched return row loc count {}, expected {}, input {}", + rows_locs.size(), res_block->rows(), column_row_ids->size()); + } // Final sort by row_ids sequence, since row_ids is already sorted if need std::map positions; for (size_t i = 0; i < rows_locs.size(); ++i) { diff --git a/be/src/exec/tablet_info.cpp b/be/src/exec/tablet_info.cpp index 3d73bf1bd886de..44846ded868e8f 100644 --- a/be/src/exec/tablet_info.cpp +++ b/be/src/exec/tablet_info.cpp @@ -129,6 +129,9 @@ Status OlapTableSchemaParam::init(const POlapTableSchemaParam& pschema) { _auto_increment_column_unique_id = pschema.auto_increment_column_unique_id(); } _timestamp_ms = pschema.timestamp_ms(); + if (pschema.has_nano_seconds()) { + _nano_seconds = pschema.nano_seconds(); + } _timezone = pschema.timezone(); for (const auto& col : pschema.partial_update_input_columns()) { @@ -273,6 +276,7 @@ void OlapTableSchemaParam::to_protobuf(POlapTableSchemaParam* pschema) const { pschema->set_auto_increment_column_unique_id(_auto_increment_column_unique_id); pschema->set_timestamp_ms(_timestamp_ms); pschema->set_timezone(_timezone); + pschema->set_nano_seconds(_nano_seconds); for (auto col : _partial_update_input_columns) { *pschema->add_partial_update_input_columns() = col; } diff --git a/be/src/exec/tablet_info.h b/be/src/exec/tablet_info.h index fcba8fd82623bb..ff1c2e8e6b072e 100644 --- a/be/src/exec/tablet_info.h +++ b/be/src/exec/tablet_info.h @@ -96,6 +96,8 @@ class OlapTableSchemaParam { int32_t auto_increment_column_unique_id() const { return _auto_increment_column_unique_id; } void set_timestamp_ms(int64_t timestamp_ms) { _timestamp_ms = timestamp_ms; } int64_t timestamp_ms() const { return _timestamp_ms; } + void set_nano_seconds(int32_t nano_seconds) { _nano_seconds = nano_seconds; } + int32_t nano_seconds() const { return _nano_seconds; } void set_timezone(std::string timezone) { _timezone = timezone; } std::string timezone() const { return _timezone; } bool is_strict_mode() const { return _is_strict_mode; } @@ -116,6 +118,7 @@ class OlapTableSchemaParam { std::string _auto_increment_column; int32_t _auto_increment_column_unique_id; int64_t _timestamp_ms = 0; + int32_t _nano_seconds {0}; std::string _timezone; }; diff --git a/be/src/exprs/runtime_filter.cpp b/be/src/exprs/runtime_filter.cpp index 2bda40caf68657..96c99c606561af 100644 --- a/be/src/exprs/runtime_filter.cpp +++ b/be/src/exprs/runtime_filter.cpp @@ -1274,6 +1274,7 @@ void IRuntimeFilter::signal() { } void IRuntimeFilter::set_filter_timer(std::shared_ptr timer) { + std::unique_lock lock(_inner_mutex); _filter_timer.push_back(timer); } diff --git a/be/src/http/action/file_cache_action.cpp b/be/src/http/action/file_cache_action.cpp index 659be2537997f3..f31c040c5cf672 100644 --- a/be/src/http/action/file_cache_action.cpp +++ b/be/src/http/action/file_cache_action.cpp @@ -22,11 +22,14 @@ #include #include +#include "common/status.h" #include "http/http_channel.h" #include "http/http_headers.h" #include "http/http_request.h" #include "http/http_status.h" +#include "io/cache/block_file_cache.h" #include "io/cache/block_file_cache_factory.h" +#include "io/cache/file_cache_common.h" #include "olap/olap_define.h" #include "olap/tablet_meta.h" #include "util/easy_json.h" @@ -39,10 +42,12 @@ constexpr static std::string_view SYNC = "sync"; constexpr static std::string_view PATH = "path"; constexpr static std::string_view CLEAR = "clear"; constexpr static std::string_view RESET = "reset"; +constexpr static std::string_view HASH = "hash"; constexpr static std::string_view CAPACITY = "capacity"; constexpr static std::string_view RELEASE = "release"; constexpr static std::string_view BASE_PATH = "base_path"; constexpr static std::string_view RELEASED_ELEMENTS = "released_elements"; +constexpr static std::string_view VALUE = "value"; Status FileCacheAction::_handle_header(HttpRequest* req, std::string* json_metrics) { req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.data()); @@ -81,6 +86,16 @@ Status FileCacheAction::_handle_header(HttpRequest* req, std::string* json_metri auto ret = io::FileCacheFactory::instance()->reset_capacity(path, new_capacity); LOG(INFO) << ret; } + } else if (operation == HASH) { + const std::string& segment_path = req->param(VALUE.data()); + if (segment_path.empty()) { + st = Status::InvalidArgument("missing parameter: {} is required", VALUE.data()); + } else { + io::UInt128Wrapper ret = io::BlockFileCache::hash(segment_path); + EasyJson json; + json[HASH.data()] = ret.to_string(); + *json_metrics = json.ToString(); + } } else { st = Status::InternalError("invalid operation: {}", operation); } @@ -92,7 +107,8 @@ void FileCacheAction::handle(HttpRequest* req) { Status status = _handle_header(req, &json_metrics); std::string status_result = status.to_json(); if (status.ok()) { - HttpChannel::send_reply(req, HttpStatus::OK, json_metrics); + HttpChannel::send_reply(req, HttpStatus::OK, + json_metrics.empty() ? status.to_json() : json_metrics); } else { HttpChannel::send_reply(req, HttpStatus::INTERNAL_SERVER_ERROR, status_result); } diff --git a/be/src/http/action/http_stream.cpp b/be/src/http/action/http_stream.cpp index c6176c52815459..4a34605aa336a1 100644 --- a/be/src/http/action/http_stream.cpp +++ b/be/src/http/action/http_stream.cpp @@ -197,7 +197,12 @@ Status HttpStreamAction::_on_header(HttpRequest* http_req, std::shared_ptrbody_bytes = 0; size_t csv_max_body_bytes = config::streaming_load_max_mb * 1024 * 1024; if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) { - ctx->body_bytes = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + try { + ctx->body_bytes = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + } catch (const std::exception& e) { + return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}", + http_req->header(HttpHeaders::CONTENT_LENGTH), e.what()); + } // csv max body size if (ctx->body_bytes > csv_max_body_bytes) { LOG(WARNING) << "body exceed max size." << ctx->brief(); @@ -352,7 +357,13 @@ Status HttpStreamAction::process_put(HttpRequest* http_req, // FIXME find a way to avoid chunked stream load write large WALs size_t content_length = 0; if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) { - content_length = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + try { + content_length = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + } catch (const std::exception& e) { + return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}", + http_req->header(HttpHeaders::CONTENT_LENGTH), + e.what()); + } if (ctx->format == TFileFormatType::FORMAT_CSV_GZ || ctx->format == TFileFormatType::FORMAT_CSV_LZO || ctx->format == TFileFormatType::FORMAT_CSV_BZ2 || diff --git a/be/src/http/action/load_channel_action.cpp b/be/src/http/action/load_channel_action.cpp new file mode 100644 index 00000000000000..35efe56ecdea2d --- /dev/null +++ b/be/src/http/action/load_channel_action.cpp @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "http/action/load_channel_action.h" + +#include +#include +#include +#include +#include +#include + +#include "cloud/config.h" +#include "http/http_channel.h" +#include "http/http_headers.h" +#include "http/http_request.h" +#include "http/http_status.h" +#include "olap/olap_common.h" +#include "olap/storage_engine.h" +#include "olap/tablet_manager.h" +#include "runtime/exec_env.h" +#include "runtime/load_channel_mgr.h" +#include "service/backend_options.h" + +namespace doris { + +const static std::string HEADER_JSON = "application/json"; + +void LoadChannelAction::handle(HttpRequest* req) { + req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.c_str()); + HttpChannel::send_reply(req, HttpStatus::OK, _get_load_channels().ToString()); +} + +EasyJson LoadChannelAction::_get_load_channels() { + EasyJson response; + + auto load_channels = ExecEnv::GetInstance()->load_channel_mgr()->get_all_load_channel_ids(); + + response["msg"] = "OK"; + response["code"] = 0; + EasyJson data = response.Set("data", EasyJson::kObject); + data["host"] = BackendOptions::get_localhost(); + EasyJson tablets = data.Set("load_channels", EasyJson::kArray); + for (auto& load_id : load_channels) { + EasyJson tablet = tablets.PushBack(EasyJson::kObject); + tablet["load_id"] = load_id; + } + response["count"] = load_channels.size(); + return response; +} + +} // namespace doris diff --git a/be/src/http/action/load_channel_action.h b/be/src/http/action/load_channel_action.h new file mode 100644 index 00000000000000..2a9ec3dbf492b5 --- /dev/null +++ b/be/src/http/action/load_channel_action.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "http/http_handler.h" +#include "http/http_handler_with_auth.h" +#include "util/easy_json.h" + +namespace doris { +class HttpRequest; + +class ExecEnv; + +// Get BE load stream info from http API. +class LoadChannelAction final : public HttpHandlerWithAuth { +public: + LoadChannelAction(ExecEnv* exec_env) : HttpHandlerWithAuth(exec_env) {} + + ~LoadChannelAction() override = default; + + void handle(HttpRequest* req) override; + +private: + static EasyJson _get_load_channels(); +}; +} // namespace doris diff --git a/be/src/http/action/stream_load.cpp b/be/src/http/action/stream_load.cpp index 1a9420dea637db..eef6a27b626539 100644 --- a/be/src/http/action/stream_load.cpp +++ b/be/src/http/action/stream_load.cpp @@ -266,7 +266,12 @@ Status StreamLoadAction::_on_header(HttpRequest* http_req, std::shared_ptrheader(HttpHeaders::CONTENT_LENGTH).empty()) { - ctx->body_bytes = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + try { + ctx->body_bytes = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + } catch (const std::exception& e) { + return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}", + http_req->header(HttpHeaders::CONTENT_LENGTH), e.what()); + } // json max body size if ((ctx->format == TFileFormatType::FORMAT_JSON) && (ctx->body_bytes > json_max_body_bytes) && !read_json_by_line) { @@ -671,7 +676,13 @@ Status StreamLoadAction::_process_put(HttpRequest* http_req, // FIXME find a way to avoid chunked stream load write large WALs size_t content_length = 0; if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) { - content_length = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + try { + content_length = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); + } catch (const std::exception& e) { + return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}", + http_req->header(HttpHeaders::CONTENT_LENGTH), + e.what()); + } if (ctx->format == TFileFormatType::FORMAT_CSV_GZ || ctx->format == TFileFormatType::FORMAT_CSV_LZO || ctx->format == TFileFormatType::FORMAT_CSV_BZ2 || diff --git a/be/src/http/default_path_handlers.cpp b/be/src/http/default_path_handlers.cpp index ded72d9f28f7df..2ece1e3fdcd20a 100644 --- a/be/src/http/default_path_handlers.cpp +++ b/be/src/http/default_path_handlers.cpp @@ -142,7 +142,7 @@ void display_tablets_callback(const WebPageHandler::ArgumentMap& args, EasyJson* // Registered to handle "/mem_tracker", and prints out memory tracker information. void mem_tracker_handler(const WebPageHandler::ArgumentMap& args, std::stringstream* output) { (*output) << "

Memory usage by subsystem

\n"; - std::vector snapshots; + std::vector snapshots; auto iter = args.find("type"); if (iter != args.end()) { if (iter->second == "global") { @@ -159,7 +159,7 @@ void mem_tracker_handler(const WebPageHandler::ArgumentMap& args, std::stringstr } else if (iter->second == "other") { MemTrackerLimiter::make_type_snapshots(&snapshots, MemTrackerLimiter::Type::OTHER); } else if (iter->second == "reserved_memory") { - GlobalMemoryArbitrator::make_reserved_memory_snapshots(&snapshots); + MemTrackerLimiter::make_all_reserved_trackers_snapshots(&snapshots); } else if (iter->second == "all") { MemTrackerLimiter::make_all_memory_state_snapshots(&snapshots); } @@ -191,7 +191,6 @@ void mem_tracker_handler(const WebPageHandler::ArgumentMap& args, std::stringstr (*output) << "" "Type" "Label" - "Parent Label" "Limit" "Current Consumption(Bytes)" @@ -207,8 +206,8 @@ void mem_tracker_handler(const WebPageHandler::ArgumentMap& args, std::stringstr string peak_consumption_normalize = AccurateItoaKMGT(item.peak_consumption); (*output) << strings::Substitute( "$0$1$2$3$4$5$6$7\n", - item.type, item.label, item.parent_label, limit_str, item.cur_consumption, + "td>\n", + item.type, item.label, limit_str, item.cur_consumption, current_consumption_normalize, item.peak_consumption, peak_consumption_normalize); } (*output) << "\n"; diff --git a/be/src/olap/compaction.cpp b/be/src/olap/compaction.cpp index 963b885a26b4ad..646613cbdadab1 100644 --- a/be/src/olap/compaction.cpp +++ b/be/src/olap/compaction.cpp @@ -627,7 +627,8 @@ Status Compaction::do_inverted_index_compaction() { auto seg_path = DORIS_TRY(rowset->segment_path(seg_id)); auto inverted_index_file_reader = std::make_unique( fs, std::string {InvertedIndexDescriptor::get_index_file_path_prefix(seg_path)}, - _cur_tablet_schema->get_inverted_index_storage_format()); + _cur_tablet_schema->get_inverted_index_storage_format(), + rowset->rowset_meta()->inverted_index_file_info(seg_id)); bool open_idx_file_cache = false; RETURN_NOT_OK_STATUS_WITH_WARN( inverted_index_file_reader->init(config::inverted_index_read_buffer_size, @@ -840,56 +841,47 @@ void Compaction::construct_skip_inverted_index(RowsetWriterContext& ctx) { return false; } - auto inverted_index_file_reader = std::make_unique( - fs, - std::string { - InvertedIndexDescriptor::get_index_file_path_prefix(*seg_path)}, - _cur_tablet_schema->get_inverted_index_storage_format()); - bool open_idx_file_cache = false; - auto st = inverted_index_file_reader->init(config::inverted_index_read_buffer_size, - open_idx_file_cache); - if (!st.ok()) { - LOG(WARNING) << "init index " - << inverted_index_file_reader->get_index_file_path(index_meta) - << " error:" << st; - return false; - } - - bool exists = false; - if (!inverted_index_file_reader->index_file_exist(index_meta, &exists).ok()) { - LOG(ERROR) << inverted_index_file_reader->get_index_file_path(index_meta) - << " fs->exists error"; - return false; - } - - if (!exists) { - LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id[" - << col_unique_id << "]," - << inverted_index_file_reader->get_index_file_path(index_meta) - << " is not exists, will skip index compaction"; - return false; - } + std::string index_file_path; + try { + auto inverted_index_file_reader = std::make_unique( + fs, + std::string { + InvertedIndexDescriptor::get_index_file_path_prefix(*seg_path)}, + _cur_tablet_schema->get_inverted_index_storage_format(), + rowset->rowset_meta()->inverted_index_file_info(i)); + bool open_idx_file_cache = false; + auto st = inverted_index_file_reader->init( + config::inverted_index_read_buffer_size, open_idx_file_cache); + index_file_path = inverted_index_file_reader->get_index_file_path(index_meta); + if (!st.ok()) { + LOG(WARNING) << "init index " << index_file_path << " error:" << st; + return false; + } - // check index meta - auto result = inverted_index_file_reader->open(index_meta); - if (!result.has_value()) { - LOG(WARNING) << "open index " - << inverted_index_file_reader->get_index_file_path(index_meta) - << " error:" << result.error(); - return false; - } - auto reader = std::move(result.value()); - std::vector files; - reader->list(&files); - reader->close(); - - // why is 3? - // bkd index will write at least 3 files - if (files.size() < 3) { + // check index meta + auto result = inverted_index_file_reader->open(index_meta); + if (!result.has_value()) { + LOG(WARNING) + << "open index " << index_file_path << " error:" << result.error(); + return false; + } + auto reader = std::move(result.value()); + std::vector files; + reader->list(&files); + reader->close(); + + // why is 3? + // bkd index will write at least 3 files + if (files.size() < 3) { + LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id[" + << col_unique_id << "]," << index_file_path + << " is corrupted, will skip index compaction"; + return false; + } + } catch (CLuceneError& err) { LOG(WARNING) << "tablet[" << _tablet->tablet_id() << "] column_unique_id[" - << col_unique_id << "]," - << inverted_index_file_reader->get_index_file_path(index_meta) - << " is corrupted, will skip index compaction"; + << col_unique_id << "] open index[" << index_file_path + << "], will skip index compaction, error:" << err.what(); return false; } } diff --git a/be/src/olap/delta_writer_v2.cpp b/be/src/olap/delta_writer_v2.cpp index 2bd1fa6a111913..73d2fb1d9746a9 100644 --- a/be/src/olap/delta_writer_v2.cpp +++ b/be/src/olap/delta_writer_v2.cpp @@ -240,7 +240,8 @@ void DeltaWriterV2::_build_current_tablet_schema(int64_t index_id, _partial_update_info->init(*_tablet_schema, table_schema_param->is_partial_update(), table_schema_param->partial_update_input_columns(), table_schema_param->is_strict_mode(), - table_schema_param->timestamp_ms(), table_schema_param->timezone(), + table_schema_param->timestamp_ms(), + table_schema_param->nano_seconds(), table_schema_param->timezone(), table_schema_param->auto_increment_coulumn()); } diff --git a/be/src/olap/inverted_index_parser.h b/be/src/olap/inverted_index_parser.h index 87ea726723721d..0b8426d74c7ab3 100644 --- a/be/src/olap/inverted_index_parser.h +++ b/be/src/olap/inverted_index_parser.h @@ -46,6 +46,8 @@ struct InvertedIndexCtx { InvertedIndexParserType parser_type; std::string parser_mode; CharFilterMap char_filter_map; + std::string lower_case; + std::string stop_words; lucene::analysis::Analyzer* analyzer = nullptr; }; diff --git a/be/src/olap/memtable.cpp b/be/src/olap/memtable.cpp index 4f66a361650875..3cb2594b845b13 100644 --- a/be/src/olap/memtable.cpp +++ b/be/src/olap/memtable.cpp @@ -50,20 +50,16 @@ using namespace ErrorCode; MemTable::MemTable(int64_t tablet_id, std::shared_ptr tablet_schema, const std::vector* slot_descs, TupleDescriptor* tuple_desc, - bool enable_unique_key_mow, PartialUpdateInfo* partial_update_info, - const std::shared_ptr& insert_mem_tracker, - const std::shared_ptr& flush_mem_tracker) - : _tablet_id(tablet_id), + bool enable_unique_key_mow, PartialUpdateInfo* partial_update_info) + : _mem_type(MemType::ACTIVE), + _tablet_id(tablet_id), _enable_unique_key_mow(enable_unique_key_mow), _keys_type(tablet_schema->keys_type()), _tablet_schema(tablet_schema), - _insert_mem_tracker(insert_mem_tracker), - _flush_mem_tracker(flush_mem_tracker), _is_first_insertion(true), _agg_functions(tablet_schema->num_columns()), _offsets_of_aggregate_states(tablet_schema->num_columns()), - _total_size_of_aggregate_states(0), - _mem_usage(0) { + _total_size_of_aggregate_states(0) { g_memtable_cnt << 1; _query_thread_context.init_unlocked(); _arena = std::make_unique(); @@ -82,6 +78,7 @@ MemTable::MemTable(int64_t tablet_id, std::shared_ptr tablet_schem } // TODO: Support ZOrderComparator in the future _init_columns_offset_by_slot_descs(slot_descs, tuple_desc); + _mem_tracker = std::make_shared(); } void MemTable::_init_columns_offset_by_slot_descs(const std::vector* slot_descs, @@ -142,6 +139,13 @@ void MemTable::_init_agg_functions(const vectorized::Block* block) { MemTable::~MemTable() { SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_thread_context.query_mem_tracker); + if (_is_flush_success) { + // If the memtable is flush success, then its memtracker's consumption should be 0 + if (_mem_tracker->consumption() != 0 && config::crash_in_memory_tracker_inaccurate) { + LOG(FATAL) << "memtable flush success but cosumption is not 0, it is " + << _mem_tracker->consumption(); + } + } g_memtable_input_block_allocated_size << -_input_mutable_block.allocated_bytes(); g_memtable_cnt << -1; if (_keys_type != KeysType::DUP_KEYS) { @@ -159,14 +163,7 @@ MemTable::~MemTable() { } } std::for_each(_row_in_blocks.begin(), _row_in_blocks.end(), std::default_delete()); - _insert_mem_tracker->release(_mem_usage); - _flush_mem_tracker->set_consumption(0); - DCHECK_EQ(_insert_mem_tracker->consumption(), 0) - << std::endl - << MemTracker::log_usage(_insert_mem_tracker->make_snapshot()); - DCHECK_EQ(_flush_mem_tracker->consumption(), 0); _arena.reset(); - _agg_buffer_pool.clear(); _vec_row_comparator.reset(); _row_in_blocks.clear(); _agg_functions.clear(); @@ -181,6 +178,7 @@ int RowInBlockComparator::operator()(const RowInBlock* left, const RowInBlock* r Status MemTable::insert(const vectorized::Block* input_block, const std::vector& row_idxs) { + SCOPED_CONSUME_MEM_TRACKER(_mem_tracker); if (_is_first_insertion) { _is_first_insertion = false; auto clone_block = input_block->clone_without_columns(&_column_offset); @@ -215,10 +213,6 @@ Status MemTable::insert(const vectorized::Block* input_block, row_idxs.data() + num_rows, &_column_offset)); auto block_size1 = _input_mutable_block.allocated_bytes(); g_memtable_input_block_allocated_size << block_size1 - block_size0; - auto input_size = size_t(input_block->bytes() * num_rows / input_block->rows() * - config::memtable_insert_memory_ratio); - _mem_usage += input_size; - _insert_mem_tracker->consume(input_size); for (int i = 0; i < num_rows; i++) { _row_in_blocks.emplace_back(new RowInBlock {cursor_in_mutableblock + i}); } @@ -323,9 +317,14 @@ Status MemTable::_sort_by_cluster_keys() { } Tie tie = Tie(0, mutable_block.rows()); - for (auto i : _tablet_schema->cluster_key_idxes()) { + for (auto cid : _tablet_schema->cluster_key_idxes()) { + auto index = _tablet_schema->field_index(cid); + if (index == -1) { + return Status::InternalError("could not find cluster key column with unique_id=" + + std::to_string(cid) + " in tablet schema"); + } auto cmp = [&](const RowInBlock* lhs, const RowInBlock* rhs) -> int { - return mutable_block.compare_one_column(lhs->_row_pos, rhs->_row_pos, i, -1); + return mutable_block.compare_one_column(lhs->_row_pos, rhs->_row_pos, index, -1); }; _sort_one_column(row_in_blocks, tie, cmp); } @@ -463,10 +462,6 @@ void MemTable::_aggregate() { } if constexpr (!is_final) { // if is not final, we collect the agg results to input_block and then continue to insert - size_t shrunked_after_agg = _output_mutable_block.allocated_bytes(); - // flush will not run here, so will not duplicate `_flush_mem_tracker` - _insert_mem_tracker->consume(shrunked_after_agg - _mem_usage); - _mem_usage = shrunked_after_agg; _input_mutable_block.swap(_output_mutable_block); //TODO(weixang):opt here. std::unique_ptr empty_input_block = in_block.create_same_struct_block(0); @@ -479,6 +474,7 @@ void MemTable::_aggregate() { } void MemTable::shrink_memtable_by_agg() { + SCOPED_CONSUME_MEM_TRACKER(_mem_tracker); if (_keys_type == KeysType::DUP_KEYS) { return; } @@ -524,8 +520,8 @@ Status MemTable::_to_block(std::unique_ptr* res) { } g_memtable_input_block_allocated_size << -_input_mutable_block.allocated_bytes(); _input_mutable_block.clear(); - _insert_mem_tracker->release(_mem_usage); - _mem_usage = 0; + // After to block, all data in arena is saved in the block + _arena.reset(); *res = vectorized::Block::create_unique(_output_mutable_block.to_block()); return Status::OK(); } diff --git a/be/src/olap/memtable.h b/be/src/olap/memtable.h index 70f7a9f22a0aa8..4ae92c2d2d8949 100644 --- a/be/src/olap/memtable.h +++ b/be/src/olap/memtable.h @@ -47,6 +47,11 @@ class TabletSchema; class TupleDescriptor; enum KeysType : int; +// Active: the memtable is currently used by writer to insert into blocks +// Write_finished: the memtable finished write blocks and in the queue waiting for flush +// FLUSH: the memtable is under flushing, write segment to disk. +enum MemType { ACTIVE = 0, WRITE_FINISHED = 1, FLUSH = 2 }; + // row pos in _input_mutable_block struct RowInBlock { size_t _row_pos; @@ -171,16 +176,11 @@ class MemTable { public: MemTable(int64_t tablet_id, std::shared_ptr tablet_schema, const std::vector* slot_descs, TupleDescriptor* tuple_desc, - bool enable_unique_key_mow, PartialUpdateInfo* partial_update_info, - const std::shared_ptr& insert_mem_tracker, - const std::shared_ptr& flush_mem_tracker); + bool enable_unique_key_mow, PartialUpdateInfo* partial_update_info); ~MemTable(); int64_t tablet_id() const { return _tablet_id; } - size_t memory_usage() const { - return _insert_mem_tracker->consumption() + _arena->used_size() + - _flush_mem_tracker->consumption(); - } + size_t memory_usage() const { return _mem_tracker->consumption(); } // insert tuple from (row_pos) to (row_pos+num_rows) Status insert(const vectorized::Block* block, const std::vector& row_idxs); @@ -196,10 +196,16 @@ class MemTable { const MemTableStat& stat() { return _stat; } - std::shared_ptr flush_mem_tracker() { return _flush_mem_tracker; } - QueryThreadContext query_thread_context() { return _query_thread_context; } + std::shared_ptr mem_tracker() { return _mem_tracker; } + + void set_flush_success() { _is_flush_success = true; } + + MemType get_mem_type() { return _mem_type; } + + void update_mem_type(MemType memtype) { _mem_type = memtype; } + private: // for vectorized void _aggregate_two_row_in_block(vectorized::MutableBlock& mutable_block, RowInBlock* new_row, @@ -209,9 +215,11 @@ class MemTable { Status _to_block(std::unique_ptr* res); private: + std::atomic _mem_type; int64_t _tablet_id; bool _enable_unique_key_mow = false; bool _is_partial_update = false; + bool _is_flush_success = false; const KeysType _keys_type; std::shared_ptr _tablet_schema; @@ -219,18 +227,11 @@ class MemTable { QueryThreadContext _query_thread_context; - // `_insert_manual_mem_tracker` manually records the memory value of memtable insert() - // `_flush_hook_mem_tracker` automatically records the memory value of memtable flush() through mem hook. - // Is used to flush when _insert_manual_mem_tracker larger than write_buffer_size and run flush memtable - // when the sum of all memtable (_insert_manual_mem_tracker + _flush_hook_mem_tracker) exceeds the limit. - std::shared_ptr _insert_mem_tracker; - std::shared_ptr _flush_mem_tracker; + std::shared_ptr _mem_tracker; // Only the rows will be inserted into block can allocate memory from _arena. // In this way, we can make MemTable::memory_usage() to be more accurate, and eventually // reduce the number of segment files that are generated by current load std::unique_ptr _arena; - // The object buffer pool for convert tuple to row - ObjectPool _agg_buffer_pool; void _init_columns_offset_by_slot_descs(const std::vector* slot_descs, const TupleDescriptor* tuple_desc); @@ -264,8 +265,6 @@ class MemTable { std::vector _offsets_of_aggregate_states; size_t _total_size_of_aggregate_states; std::vector _row_in_blocks; - // Memory usage without _arena. - size_t _mem_usage; size_t _num_columns; int32_t _seq_col_idx_in_block = -1; diff --git a/be/src/olap/memtable_flush_executor.cpp b/be/src/olap/memtable_flush_executor.cpp index 887340eed700fe..dc911647be8f96 100644 --- a/be/src/olap/memtable_flush_executor.cpp +++ b/be/src/olap/memtable_flush_executor.cpp @@ -46,10 +46,10 @@ class MemtableFlushTask final : public Runnable { ENABLE_FACTORY_CREATOR(MemtableFlushTask); public: - MemtableFlushTask(std::shared_ptr flush_token, std::unique_ptr memtable, + MemtableFlushTask(std::shared_ptr flush_token, std::shared_ptr memtable, int32_t segment_id, int64_t submit_task_time) : _flush_token(flush_token), - _memtable(std::move(memtable)), + _memtable(memtable), _segment_id(segment_id), _submit_task_time(submit_task_time) { g_flush_task_num << 1; @@ -60,7 +60,7 @@ class MemtableFlushTask final : public Runnable { void run() override { auto token = _flush_token.lock(); if (token) { - token->_flush_memtable(std::move(_memtable), _segment_id, _submit_task_time); + token->_flush_memtable(_memtable, _segment_id, _submit_task_time); } else { LOG(WARNING) << "flush token is deconstructed, ignore the flush task"; } @@ -68,7 +68,7 @@ class MemtableFlushTask final : public Runnable { private: std::weak_ptr _flush_token; - std::unique_ptr _memtable; + std::shared_ptr _memtable; int32_t _segment_id; int64_t _submit_task_time; }; @@ -83,7 +83,7 @@ std::ostream& operator<<(std::ostream& os, const FlushStatistic& stat) { return os; } -Status FlushToken::submit(std::unique_ptr mem_table) { +Status FlushToken::submit(std::shared_ptr mem_table) { { std::shared_lock rdlk(_flush_status_lock); DBUG_EXECUTE_IF("FlushToken.submit_flush_error", { @@ -98,9 +98,8 @@ Status FlushToken::submit(std::unique_ptr mem_table) { return Status::OK(); } int64_t submit_task_time = MonotonicNanos(); - auto task = MemtableFlushTask::create_shared(shared_from_this(), std::move(mem_table), - _rowset_writer->allocate_segment_id(), - submit_task_time); + auto task = MemtableFlushTask::create_shared( + shared_from_this(), mem_table, _rowset_writer->allocate_segment_id(), submit_task_time); Status ret = _thread_pool->submit(std::move(task)); if (ret.ok()) { // _wait_running_task_finish was executed after this function, so no need to notify _cond here @@ -136,20 +135,19 @@ Status FlushToken::_do_flush_memtable(MemTable* memtable, int32_t segment_id, in VLOG_CRITICAL << "begin to flush memtable for tablet: " << memtable->tablet_id() << ", memsize: " << memtable->memory_usage() << ", rows: " << memtable->stat().raw_rows; + memtable->update_mem_type(MemType::FLUSH); int64_t duration_ns; SCOPED_RAW_TIMER(&duration_ns); SCOPED_ATTACH_TASK(memtable->query_thread_context()); signal::set_signal_task_id(_rowset_writer->load_id()); signal::tablet_id = memtable->tablet_id(); { + SCOPED_CONSUME_MEM_TRACKER(memtable->mem_tracker()); std::unique_ptr block; - // During to block method, it will release old memory and create new block, so that - // we could not scoped it. RETURN_IF_ERROR(memtable->to_block(&block)); - memtable->flush_mem_tracker()->consume(block->allocated_bytes()); - SCOPED_CONSUME_MEM_TRACKER(memtable->flush_mem_tracker()); RETURN_IF_ERROR(_rowset_writer->flush_memtable(block.get(), segment_id, flush_size)); } + memtable->set_flush_success(); _memtable_stat += memtable->stat(); DorisMetrics::instance()->memtable_flush_total->increment(1); DorisMetrics::instance()->memtable_flush_duration_us->increment(duration_ns / 1000); @@ -158,7 +156,7 @@ Status FlushToken::_do_flush_memtable(MemTable* memtable, int32_t segment_id, in return Status::OK(); } -void FlushToken::_flush_memtable(std::unique_ptr memtable_ptr, int32_t segment_id, +void FlushToken::_flush_memtable(std::shared_ptr memtable_ptr, int32_t segment_id, int64_t submit_task_time) { Defer defer {[&]() { std::lock_guard lock(_mutex); diff --git a/be/src/olap/memtable_flush_executor.h b/be/src/olap/memtable_flush_executor.h index 2d20298f800a37..25c5a37afba3ac 100644 --- a/be/src/olap/memtable_flush_executor.h +++ b/be/src/olap/memtable_flush_executor.h @@ -61,7 +61,7 @@ class FlushToken : public std::enable_shared_from_this { public: FlushToken(ThreadPool* thread_pool) : _flush_status(Status::OK()), _thread_pool(thread_pool) {} - Status submit(std::unique_ptr mem_table); + Status submit(std::shared_ptr mem_table); // error has happens, so we cancel this token // And remove all tasks in the queue. @@ -87,7 +87,7 @@ class FlushToken : public std::enable_shared_from_this { private: friend class MemtableFlushTask; - void _flush_memtable(std::unique_ptr memtable_ptr, int32_t segment_id, + void _flush_memtable(std::shared_ptr memtable_ptr, int32_t segment_id, int64_t submit_task_time); Status _do_flush_memtable(MemTable* memtable, int32_t segment_id, int64_t* flush_size); diff --git a/be/src/olap/memtable_memory_limiter.cpp b/be/src/olap/memtable_memory_limiter.cpp index 23b760284b8985..213d5aaab9a7bc 100644 --- a/be/src/olap/memtable_memory_limiter.cpp +++ b/be/src/olap/memtable_memory_limiter.cpp @@ -20,6 +20,7 @@ #include #include "common/config.h" +#include "olap/memtable.h" #include "olap/memtable_writer.h" #include "util/doris_metrics.h" #include "util/mem_info.h" @@ -62,10 +63,7 @@ Status MemTableMemoryLimiter::init(int64_t process_mem_limit) { _load_hard_mem_limit * config::load_process_safe_mem_permit_percent / 100; g_load_hard_mem_limit.set_value(_load_hard_mem_limit); g_load_soft_mem_limit.set_value(_load_soft_mem_limit); - _memtable_tracker_set = - MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::LOAD, "MemTableTrackerSet"); - _mem_tracker = std::make_unique("AllMemTableMemory", - ExecEnv::GetInstance()->details_mem_tracker_set()); + _mem_tracker = std::make_unique("AllMemTableMemory"); REGISTER_HOOK_METRIC(memtable_memory_limiter_mem_consumption, [this]() { return _mem_tracker->consumption(); }); _log_timer.start(); @@ -77,33 +75,41 @@ void MemTableMemoryLimiter::register_writer(std::weak_ptr writer _writers.push_back(writer); } -bool MemTableMemoryLimiter::_sys_avail_mem_less_than_warning_water_mark() { +int64_t MemTableMemoryLimiter::_sys_avail_mem_less_than_warning_water_mark() { // reserve a small amount of memory so we do not trigger MinorGC - return doris::GlobalMemoryArbitrator::sys_mem_available() < - doris::MemInfo::sys_mem_available_warning_water_mark() + - config::memtable_limiter_reserved_memory_bytes; + return doris::MemInfo::sys_mem_available_warning_water_mark() - + doris::GlobalMemoryArbitrator::sys_mem_available() + + config::memtable_limiter_reserved_memory_bytes; } -bool MemTableMemoryLimiter::_process_used_mem_more_than_soft_mem_limit() { +int64_t MemTableMemoryLimiter::_process_used_mem_more_than_soft_mem_limit() { // reserve a small amount of memory so we do not trigger MinorGC - return GlobalMemoryArbitrator::process_memory_usage() > - MemInfo::soft_mem_limit() - config::memtable_limiter_reserved_memory_bytes; + return GlobalMemoryArbitrator::process_memory_usage() - MemInfo::soft_mem_limit() + + config::memtable_limiter_reserved_memory_bytes; } bool MemTableMemoryLimiter::_soft_limit_reached() { - return _mem_tracker->consumption() >= _load_soft_mem_limit || _hard_limit_reached(); + return _mem_tracker->consumption() > _load_soft_mem_limit || _hard_limit_reached(); } bool MemTableMemoryLimiter::_hard_limit_reached() { - return _mem_tracker->consumption() >= _load_hard_mem_limit || - _sys_avail_mem_less_than_warning_water_mark() || - _process_used_mem_more_than_soft_mem_limit(); + return _mem_tracker->consumption() > _load_hard_mem_limit || + _sys_avail_mem_less_than_warning_water_mark() > 0 || + _process_used_mem_more_than_soft_mem_limit() > 0; } bool MemTableMemoryLimiter::_load_usage_low() { return _mem_tracker->consumption() <= _load_safe_mem_permit; } +int64_t MemTableMemoryLimiter::_need_flush() { + int64_t limit1 = _mem_tracker->consumption() - _load_soft_mem_limit; + int64_t limit2 = _sys_avail_mem_less_than_warning_water_mark(); + int64_t limit3 = _process_used_mem_more_than_soft_mem_limit(); + int64_t need_flush = std::max(limit1, std::max(limit2, limit3)); + return need_flush - _queue_mem_usage; +} + void MemTableMemoryLimiter::handle_memtable_flush() { // Check the soft limit. DCHECK(_load_soft_mem_limit > 0); @@ -114,34 +120,29 @@ void MemTableMemoryLimiter::handle_memtable_flush() { timer.start(); std::unique_lock l(_lock); g_memtable_memory_limit_waiting_threads << 1; - while (_hard_limit_reached()) { - LOG(INFO) << "reached memtable memory hard limit" - << " (active: " << PrettyPrinter::print_bytes(_active_mem_usage) - << ", write: " << PrettyPrinter::print_bytes(_write_mem_usage) - << ", flush: " << PrettyPrinter::print_bytes(_flush_mem_usage) << ")"; - if (_active_mem_usage >= - _write_mem_usage * config::memtable_hard_limit_active_percent / 100) { - _flush_active_memtables(_write_mem_usage / 20); - } - if (!_hard_limit_reached()) { - break; + bool first = true; + do { + if (!first) { + auto st = _hard_limit_end_cond.wait_for(l, std::chrono::milliseconds(1000)); + if (st == std::cv_status::timeout) { + LOG(INFO) << "timeout when waiting for memory hard limit end, try again"; + } } - auto st = _hard_limit_end_cond.wait_for(l, std::chrono::milliseconds(1000)); - if (st == std::cv_status::timeout) { - LOG(INFO) << "timeout when waiting for memory hard limit end, try again"; + first = false; + int64_t need_flush = _need_flush(); + if (need_flush > 0) { + auto limit = _hard_limit_reached() ? Limit::HARD : Limit::SOFT; + LOG(INFO) << "reached memtable memory " << (limit == Limit::HARD ? "hard" : "soft") + << ", " << GlobalMemoryArbitrator::process_memory_used_details_str() + << ", load mem: " << PrettyPrinter::print_bytes(_mem_tracker->consumption()) + << ", memtable writers num: " << _writers.size() + << ", active: " << PrettyPrinter::print_bytes(_active_mem_usage) + << ", queue: " << PrettyPrinter::print_bytes(_queue_mem_usage) + << ", flush: " << PrettyPrinter::print_bytes(_flush_mem_usage); + _flush_active_memtables(need_flush); } - } + } while (_hard_limit_reached()); g_memtable_memory_limit_waiting_threads << -1; - if (_soft_limit_reached()) { - LOG(INFO) << "reached memtable memory soft limit" - << " (active: " << PrettyPrinter::print_bytes(_active_mem_usage) - << ", write: " << PrettyPrinter::print_bytes(_write_mem_usage) - << ", flush: " << PrettyPrinter::print_bytes(_flush_mem_usage) << ")"; - if (_active_mem_usage >= - _write_mem_usage * config::memtable_soft_limit_active_percent / 100) { - _flush_active_memtables(_write_mem_usage / 20); - } - } timer.stop(); int64_t time_ms = timer.elapsed_time() / 1000 / 1000; g_memtable_memory_limit_latency_ms << time_ms; @@ -157,50 +158,50 @@ void MemTableMemoryLimiter::_flush_active_memtables(int64_t need_flush) { if (_active_writers.size() == 0) { return; } + + using WriterMem = std::pair, int64_t>; + auto cmp = [](WriterMem left, WriterMem right) { return left.second > right.second; }; + std::priority_queue, decltype(cmp)> heap(cmp); + + for (auto writer : _active_writers) { + auto w = writer.lock(); + if (w == nullptr) { + continue; + } + heap.emplace(w, w->active_memtable_mem_consumption()); + } + int64_t mem_flushed = 0; int64_t num_flushed = 0; - int64_t avg_mem = _active_mem_usage / _active_writers.size(); - for (auto writer : _active_writers) { - int64_t mem = _flush_memtable(writer, avg_mem); + + while (mem_flushed < need_flush && !heap.empty()) { + auto [writer, sort_mem] = heap.top(); + heap.pop(); + auto w = writer.lock(); + if (w == nullptr) { + continue; + } + int64_t mem = w->active_memtable_mem_consumption(); + if (mem < sort_mem * 0.9) { + // if the memtable writer just got flushed, don't flush it again + continue; + } + Status st = w->flush_async(); + if (!st.ok()) { + auto err_msg = fmt::format( + "tablet writer failed to reduce mem consumption by flushing memtable, " + "tablet_id={}, err={}", + w->tablet_id(), st.to_string()); + LOG(WARNING) << err_msg; + static_cast(w->cancel_with_status(st)); + } mem_flushed += mem; num_flushed += (mem > 0); - if (mem_flushed >= need_flush) { - break; - } } LOG(INFO) << "flushed " << num_flushed << " out of " << _active_writers.size() << " active writers, flushed size: " << PrettyPrinter::print_bytes(mem_flushed); } -int64_t MemTableMemoryLimiter::_flush_memtable(std::weak_ptr writer_to_flush, - int64_t threshold) { - auto writer = writer_to_flush.lock(); - if (!writer) { - return 0; - } - auto mem_usage = writer->active_memtable_mem_consumption(); - // if the memtable writer just got flushed, don't flush it again - if (mem_usage < threshold) { - VLOG_DEBUG << "flushing active memtables, active mem usage " - << PrettyPrinter::print_bytes(mem_usage) << " is less than " - << PrettyPrinter::print_bytes(threshold) << ", skipping"; - return 0; - } - VLOG_DEBUG << "flushing active memtables, active mem usage " - << PrettyPrinter::print_bytes(mem_usage); - Status st = writer->flush_async(); - if (!st.ok()) { - auto err_msg = fmt::format( - "tablet writer failed to reduce mem consumption by flushing memtable, " - "tablet_id={}, err={}", - writer->tablet_id(), st.to_string()); - LOG(WARNING) << err_msg; - static_cast(writer->cancel_with_status(st)); - return 0; - } - return mem_usage; -} - void MemTableMemoryLimiter::refresh_mem_tracker() { std::lock_guard l(_lock); _refresh_mem_tracker(); @@ -221,41 +222,38 @@ void MemTableMemoryLimiter::refresh_mem_tracker() { _last_limit = limit; _log_timer.reset(); - // if not exist load task, this log should not be printed. - if (_mem_usage != 0) { - LOG(INFO) << fmt::format( - "{}, {}, load mem: {}, memtable writers num: {} (active: {}, write: {}, flush: {})", - ss.str(), GlobalMemoryArbitrator::process_memory_used_details_str(), - PrettyPrinter::print_bytes(_mem_tracker->consumption()), _writers.size(), - PrettyPrinter::print_bytes(_active_mem_usage), - PrettyPrinter::print_bytes(_write_mem_usage), - PrettyPrinter::print_bytes(_flush_mem_usage)); - } + LOG(INFO) << ss.str() << ", " << GlobalMemoryArbitrator::process_memory_used_details_str() + << ", load mem: " << PrettyPrinter::print_bytes(_mem_tracker->consumption()) + << ", memtable writers num: " << _writers.size() + << ", active: " << PrettyPrinter::print_bytes(_active_mem_usage) + << ", queue: " << PrettyPrinter::print_bytes(_queue_mem_usage) + << ", flush: " << PrettyPrinter::print_bytes(_flush_mem_usage); } void MemTableMemoryLimiter::_refresh_mem_tracker() { _flush_mem_usage = 0; - _write_mem_usage = 0; + _queue_mem_usage = 0; _active_mem_usage = 0; _active_writers.clear(); for (auto it = _writers.begin(); it != _writers.end();) { if (auto writer = it->lock()) { + // The memtable is currently used by writer to insert blocks. auto active_usage = writer->active_memtable_mem_consumption(); _active_mem_usage += active_usage; if (active_usage > 0) { _active_writers.push_back(writer); } _flush_mem_usage += writer->mem_consumption(MemType::FLUSH); - _write_mem_usage += writer->mem_consumption(MemType::WRITE); + _queue_mem_usage += writer->mem_consumption(MemType::WRITE_FINISHED); ++it; } else { *it = std::move(_writers.back()); _writers.pop_back(); } } - _mem_usage = _flush_mem_usage + _write_mem_usage; + _mem_usage = _flush_mem_usage + _queue_mem_usage; g_memtable_active_memory.set_value(_active_mem_usage); - g_memtable_write_memory.set_value(_write_mem_usage); + g_memtable_write_memory.set_value(_queue_mem_usage); g_memtable_flush_memory.set_value(_flush_mem_usage); g_memtable_load_memory.set_value(_mem_usage); VLOG_DEBUG << "refreshed mem_tracker, num writers: " << _writers.size(); diff --git a/be/src/olap/memtable_memory_limiter.h b/be/src/olap/memtable_memory_limiter.h index 2e8271bab35c15..1e32cb165e4721 100644 --- a/be/src/olap/memtable_memory_limiter.h +++ b/be/src/olap/memtable_memory_limiter.h @@ -20,7 +20,7 @@ #include #include "common/status.h" -#include "runtime/memory/mem_tracker_limiter.h" +#include "runtime/memory/mem_tracker.h" #include "util/countdown_latch.h" #include "util/stopwatch.hpp" @@ -45,31 +45,28 @@ class MemTableMemoryLimiter { void refresh_mem_tracker(); - MemTrackerLimiter* memtable_tracker_set() { return _memtable_tracker_set.get(); } MemTracker* mem_tracker() { return _mem_tracker.get(); } int64_t mem_usage() const { return _mem_usage; } private: - static inline bool _sys_avail_mem_less_than_warning_water_mark(); - static inline bool _process_used_mem_more_than_soft_mem_limit(); + static inline int64_t _sys_avail_mem_less_than_warning_water_mark(); + static inline int64_t _process_used_mem_more_than_soft_mem_limit(); bool _soft_limit_reached(); bool _hard_limit_reached(); bool _load_usage_low(); + int64_t _need_flush(); void _flush_active_memtables(int64_t need_flush); - int64_t _flush_memtable(std::weak_ptr writer_to_flush, int64_t threshold); void _refresh_mem_tracker(); std::mutex _lock; std::condition_variable _hard_limit_end_cond; int64_t _mem_usage = 0; int64_t _flush_mem_usage = 0; - int64_t _write_mem_usage = 0; + int64_t _queue_mem_usage = 0; int64_t _active_mem_usage = 0; - // mem tracker collection of all mem tables. - std::shared_ptr _memtable_tracker_set; // sum of all mem table memory. std::unique_ptr _mem_tracker; int64_t _load_hard_mem_limit = -1; diff --git a/be/src/olap/memtable_writer.cpp b/be/src/olap/memtable_writer.cpp index 114a7841b92204..e8123c48eccd29 100644 --- a/be/src/olap/memtable_writer.cpp +++ b/be/src/olap/memtable_writer.cpp @@ -133,12 +133,18 @@ Status MemTableWriter::write(const vectorized::Block* block, Status MemTableWriter::_flush_memtable_async() { DCHECK(_flush_token != nullptr); - std::unique_ptr memtable; + std::shared_ptr memtable; { std::lock_guard l(_mem_table_ptr_lock); - memtable = std::move(_mem_table); + memtable = _mem_table; + _mem_table = nullptr; } - return _flush_token->submit(std::move(memtable)); + { + std::lock_guard l(_mem_table_ptr_lock); + memtable->update_mem_type(MemType::WRITE_FINISHED); + _freezed_mem_tables.push_back(memtable); + } + return _flush_token->submit(memtable); } Status MemTableWriter::flush_async() { @@ -187,35 +193,10 @@ Status MemTableWriter::wait_flush() { } void MemTableWriter::_reset_mem_table() { -#ifndef BE_TEST - auto mem_table_insert_tracker = std::make_shared( - fmt::format("MemTableManualInsert:TabletId={}:MemTableNum={}#loadID={}", - std::to_string(tablet_id()), _mem_table_num, - UniqueId(_req.load_id).to_string()), - ExecEnv::GetInstance()->memtable_memory_limiter()->memtable_tracker_set()); - auto mem_table_flush_tracker = std::make_shared( - fmt::format("MemTableHookFlush:TabletId={}:MemTableNum={}#loadID={}", - std::to_string(tablet_id()), _mem_table_num++, - UniqueId(_req.load_id).to_string()), - ExecEnv::GetInstance()->memtable_memory_limiter()->memtable_tracker_set()); -#else - auto mem_table_insert_tracker = std::make_shared(fmt::format( - "MemTableManualInsert:TabletId={}:MemTableNum={}#loadID={}", - std::to_string(tablet_id()), _mem_table_num, UniqueId(_req.load_id).to_string())); - auto mem_table_flush_tracker = std::make_shared(fmt::format( - "MemTableHookFlush:TabletId={}:MemTableNum={}#loadID={}", std::to_string(tablet_id()), - _mem_table_num++, UniqueId(_req.load_id).to_string())); -#endif - { - std::lock_guard l(_mem_table_tracker_lock); - _mem_table_insert_trackers.push_back(mem_table_insert_tracker); - _mem_table_flush_trackers.push_back(mem_table_flush_tracker); - } { std::lock_guard l(_mem_table_ptr_lock); _mem_table.reset(new MemTable(_req.tablet_id, _tablet_schema, _req.slots, _req.tuple_desc, - _unique_key_mow, _partial_update_info.get(), - mem_table_insert_tracker, mem_table_flush_tracker)); + _unique_key_mow, _partial_update_info.get())); } _segment_num++; @@ -366,15 +347,11 @@ int64_t MemTableWriter::mem_consumption(MemType mem) { } int64_t mem_usage = 0; { - std::lock_guard l(_mem_table_tracker_lock); - if ((mem & MemType::WRITE) == MemType::WRITE) { // 3 & 2 = 2 - for (const auto& mem_table_tracker : _mem_table_insert_trackers) { - mem_usage += mem_table_tracker->consumption(); - } - } - if ((mem & MemType::FLUSH) == MemType::FLUSH) { // 3 & 1 = 1 - for (const auto& mem_table_tracker : _mem_table_flush_trackers) { - mem_usage += mem_table_tracker->consumption(); + std::lock_guard l(_mem_table_ptr_lock); + for (const auto& mem_table : _freezed_mem_tables) { + auto mem_table_sptr = mem_table.lock(); + if (mem_table_sptr != nullptr && mem_table_sptr->get_mem_type() == mem) { + mem_usage += mem_table_sptr->memory_usage(); } } } diff --git a/be/src/olap/memtable_writer.h b/be/src/olap/memtable_writer.h index ee7c8e1538a19b..ec44348b4a9e11 100644 --- a/be/src/olap/memtable_writer.h +++ b/be/src/olap/memtable_writer.h @@ -57,8 +57,6 @@ namespace vectorized { class Block; } // namespace vectorized -enum MemType { WRITE = 1, FLUSH = 2, ALL = 3 }; - // Writer for a particular (load, index, tablet). // This class is NOT thread-safe, external synchronization is required. class MemTableWriter { @@ -123,18 +121,17 @@ class MemTableWriter { Status _cancel_status; WriteRequest _req; std::shared_ptr _rowset_writer; - std::unique_ptr _mem_table; + std::shared_ptr _mem_table; TabletSchemaSPtr _tablet_schema; bool _unique_key_mow = false; // This variable is accessed from writer thread and token flush thread // use a shared ptr to avoid use after free problem. std::shared_ptr _flush_token; - std::vector> _mem_table_insert_trackers; - std::vector> _mem_table_flush_trackers; - SpinLock _mem_table_tracker_lock; + // Save the not active memtable that is in flush queue or under flushing. + std::vector> _freezed_mem_tables; + // The lock to protect _memtable and _freezed_mem_tables structure to avoid concurrency modification or read SpinLock _mem_table_ptr_lock; - std::atomic _mem_table_num = 1; QueryThreadContext _query_thread_context; std::mutex _lock; diff --git a/be/src/olap/merger.cpp b/be/src/olap/merger.cpp index cba828785d90ec..ab034123ac883c 100644 --- a/be/src/olap/merger.cpp +++ b/be/src/olap/merger.cpp @@ -57,30 +57,6 @@ #include "vec/olap/vertical_merge_iterator.h" namespace doris { -namespace { - -// for mow with cluster key table, the key group also contains cluster key columns. -// the `key_group_cluster_key_idxes` marks the positions of cluster key columns in key group. -void _generate_key_group_cluster_key_idxes(const TabletSchema& tablet_schema, - std::vector>& column_groups, - std::vector& key_group_cluster_key_idxes) { - if (column_groups.empty() || tablet_schema.cluster_key_idxes().empty()) { - return; - } - - auto& key_column_group = column_groups[0]; - for (const auto& index_in_tablet_schema : tablet_schema.cluster_key_idxes()) { - for (auto j = 0; j < key_column_group.size(); ++j) { - auto cid = key_column_group[j]; - if (cid == index_in_tablet_schema) { - key_group_cluster_key_idxes.emplace_back(j); - break; - } - } - } -} - -} // namespace Status Merger::vmerge_rowsets(BaseTabletSPtr tablet, ReaderType reader_type, const TabletSchema& cur_tablet_schema, @@ -183,7 +159,8 @@ Status Merger::vmerge_rowsets(BaseTabletSPtr tablet, ReaderType reader_type, // split columns into several groups, make sure all keys in one group // unique_key should consider sequence&delete column void Merger::vertical_split_columns(const TabletSchema& tablet_schema, - std::vector>* column_groups) { + std::vector>* column_groups, + std::vector* key_group_cluster_key_idxes) { uint32_t num_key_cols = tablet_schema.num_key_columns(); uint32_t total_cols = tablet_schema.num_columns(); std::vector key_columns; @@ -206,8 +183,24 @@ void Merger::vertical_split_columns(const TabletSchema& tablet_schema, } if (!tablet_schema.cluster_key_idxes().empty()) { for (const auto& cid : tablet_schema.cluster_key_idxes()) { - if (cid >= num_key_cols) { - key_columns.emplace_back(cid); + auto idx = tablet_schema.field_index(cid); + DCHECK(idx >= 0) << "could not find cluster key column with unique_id=" << cid + << " in tablet schema, table_id=" << tablet_schema.table_id(); + if (idx >= num_key_cols) { + key_columns.emplace_back(idx); + } + } + // tablet schema unique ids: [1, 2, 5, 3, 6, 4], [1 2] is key columns + // cluster key unique ids: [3, 1, 4] + // the key_columns should be [0, 1, 3, 5] + // the key_group_cluster_key_idxes should be [2, 1, 3] + for (const auto& cid : tablet_schema.cluster_key_idxes()) { + auto idx = tablet_schema.field_index(cid); + for (auto i = 0; i < key_columns.size(); ++i) { + if (idx == key_columns[i]) { + key_group_cluster_key_idxes->emplace_back(i); + break; + } } } } @@ -218,14 +211,12 @@ void Merger::vertical_split_columns(const TabletSchema& tablet_schema, if (!key_columns.empty()) { column_groups->emplace_back(std::move(key_columns)); } - auto&& cluster_key_idxes = tablet_schema.cluster_key_idxes(); std::vector value_columns; for (uint32_t i = num_key_cols; i < total_cols; ++i) { if (i == sequence_col_idx || i == delete_sign_idx || - cluster_key_idxes.end() != - std::find(cluster_key_idxes.begin(), cluster_key_idxes.end(), i)) { + key_columns.end() != std::find(key_columns.begin(), key_columns.end(), i)) { continue; } @@ -460,11 +451,8 @@ Status Merger::vertical_merge_rowsets(BaseTabletSPtr tablet, ReaderType reader_t int64_t merge_way_num, Statistics* stats_output) { LOG(INFO) << "Start to do vertical compaction, tablet_id: " << tablet->tablet_id(); std::vector> column_groups; - vertical_split_columns(tablet_schema, &column_groups); - std::vector key_group_cluster_key_idxes; - _generate_key_group_cluster_key_idxes(tablet_schema, column_groups, - key_group_cluster_key_idxes); + vertical_split_columns(tablet_schema, &column_groups, &key_group_cluster_key_idxes); vectorized::RowSourcesBuffer row_sources_buf( tablet->tablet_id(), dst_rowset_writer->context().tablet_path, reader_type); diff --git a/be/src/olap/merger.h b/be/src/olap/merger.h index cb05162b3bc9a1..7d430cde7f33c3 100644 --- a/be/src/olap/merger.h +++ b/be/src/olap/merger.h @@ -66,7 +66,8 @@ class Merger { // for vertical compaction static void vertical_split_columns(const TabletSchema& tablet_schema, - std::vector>* column_groups); + std::vector>* column_groups, + std::vector* key_group_cluster_key_idxes); static Status vertical_compact_one_group( BaseTabletSPtr tablet, ReaderType reader_type, const TabletSchema& tablet_schema, bool is_key, const std::vector& column_group, diff --git a/be/src/olap/page_cache.h b/be/src/olap/page_cache.h index 09fc689959ce4c..32b6683e7823b0 100644 --- a/be/src/olap/page_cache.h +++ b/be/src/olap/page_cache.h @@ -92,28 +92,28 @@ class StoragePageCache { } }; - class DataPageCache : public LRUCachePolicyTrackingAllocator { + class DataPageCache : public LRUCachePolicy { public: DataPageCache(size_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingAllocator( - CachePolicy::CacheType::DATA_PAGE_CACHE, capacity, LRUCacheType::SIZE, - config::data_page_cache_stale_sweep_time_sec, num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::DATA_PAGE_CACHE, capacity, + LRUCacheType::SIZE, config::data_page_cache_stale_sweep_time_sec, + num_shards) {} }; - class IndexPageCache : public LRUCachePolicyTrackingAllocator { + class IndexPageCache : public LRUCachePolicy { public: IndexPageCache(size_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingAllocator( - CachePolicy::CacheType::INDEXPAGE_CACHE, capacity, LRUCacheType::SIZE, - config::index_page_cache_stale_sweep_time_sec, num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::INDEXPAGE_CACHE, capacity, + LRUCacheType::SIZE, config::index_page_cache_stale_sweep_time_sec, + num_shards) {} }; - class PKIndexPageCache : public LRUCachePolicyTrackingAllocator { + class PKIndexPageCache : public LRUCachePolicy { public: PKIndexPageCache(size_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingAllocator( - CachePolicy::CacheType::PK_INDEX_PAGE_CACHE, capacity, LRUCacheType::SIZE, - config::pk_index_page_cache_stale_sweep_time_sec, num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::PK_INDEX_PAGE_CACHE, capacity, + LRUCacheType::SIZE, + config::pk_index_page_cache_stale_sweep_time_sec, num_shards) {} }; static constexpr uint32_t kDefaultNumShards = 16; @@ -164,7 +164,7 @@ class StoragePageCache { // delete bitmap in unique key with mow std::unique_ptr _pk_index_page_cache; - LRUCachePolicyTrackingAllocator* _get_page_cache(segment_v2::PageTypePB page_type) { + LRUCachePolicy* _get_page_cache(segment_v2::PageTypePB page_type) { switch (page_type) { case segment_v2::DATA_PAGE: { return _data_page_cache.get(); diff --git a/be/src/olap/partial_update_info.cpp b/be/src/olap/partial_update_info.cpp index 6a6ec9deabf5b5..b8e528e99e10aa 100644 --- a/be/src/olap/partial_update_info.cpp +++ b/be/src/olap/partial_update_info.cpp @@ -23,8 +23,10 @@ #include "olap/olap_common.h" #include "olap/rowset/rowset.h" #include "olap/rowset/rowset_writer_context.h" +#include "olap/tablet_meta.h" #include "olap/tablet_schema.h" #include "olap/utils.h" +#include "util/bitmap_value.h" #include "vec/common/assert_cast.h" #include "vec/core/block.h" @@ -32,12 +34,14 @@ namespace doris { void PartialUpdateInfo::init(const TabletSchema& tablet_schema, bool partial_update, const std::set& partial_update_cols, bool is_strict_mode, - int64_t timestamp_ms, const std::string& timezone, - const std::string& auto_increment_column, int64_t cur_max_version) { + int64_t timestamp_ms, int32_t nano_seconds, + const std::string& timezone, const std::string& auto_increment_column, + int64_t cur_max_version) { is_partial_update = partial_update; partial_update_input_columns = partial_update_cols; max_version_in_flush_phase = cur_max_version; this->timestamp_ms = timestamp_ms; + this->nano_seconds = nano_seconds; this->timezone = timezone; missing_cids.clear(); update_cids.clear(); @@ -78,6 +82,7 @@ void PartialUpdateInfo::to_pb(PartialUpdateInfoPB* partial_update_info_pb) const can_insert_new_rows_in_partial_update); partial_update_info_pb->set_is_strict_mode(is_strict_mode); partial_update_info_pb->set_timestamp_ms(timestamp_ms); + partial_update_info_pb->set_nano_seconds(nano_seconds); partial_update_info_pb->set_timezone(timezone); partial_update_info_pb->set_is_input_columns_contains_auto_inc_column( is_input_columns_contains_auto_inc_column); @@ -114,6 +119,9 @@ void PartialUpdateInfo::from_pb(PartialUpdateInfoPB* partial_update_info_pb) { partial_update_info_pb->is_input_columns_contains_auto_inc_column(); is_schema_contains_auto_inc_column = partial_update_info_pb->is_schema_contains_auto_inc_column(); + if (partial_update_info_pb->has_nano_seconds()) { + nano_seconds = partial_update_info_pb->nano_seconds(); + } default_values.clear(); for (const auto& value : partial_update_info_pb->default_values()) { default_values.push_back(value); @@ -156,15 +164,28 @@ void PartialUpdateInfo::_generate_default_values_for_missing_cids( if (UNLIKELY(column.type() == FieldType::OLAP_FIELD_TYPE_DATETIMEV2 && to_lower(column.default_value()).find(to_lower("CURRENT_TIMESTAMP")) != std::string::npos)) { - DateV2Value dtv; - dtv.from_unixtime(timestamp_ms / 1000, timezone); - default_value = dtv.debug_string(); + auto pos = to_lower(column.default_value()).find('('); + if (pos == std::string::npos) { + DateV2Value dtv; + dtv.from_unixtime(timestamp_ms / 1000, timezone); + default_value = dtv.debug_string(); + } else { + int precision = std::stoi(column.default_value().substr(pos + 1)); + DateV2Value dtv; + dtv.from_unixtime(timestamp_ms / 1000, nano_seconds, timezone, precision); + default_value = dtv.debug_string(); + } } else if (UNLIKELY(column.type() == FieldType::OLAP_FIELD_TYPE_DATEV2 && to_lower(column.default_value()).find(to_lower("CURRENT_DATE")) != std::string::npos)) { DateV2Value dv; dv.from_unixtime(timestamp_ms / 1000, timezone); default_value = dv.debug_string(); + } else if (UNLIKELY(column.type() == FieldType::OLAP_FIELD_TYPE_OBJECT && + to_lower(column.default_value()).find(to_lower("BITMAP_EMPTY")) != + std::string::npos)) { + BitmapValue v = BitmapValue {}; + default_value = v.to_string(); } else { default_value = column.default_value(); } diff --git a/be/src/olap/partial_update_info.h b/be/src/olap/partial_update_info.h index 3366c414cf03ff..278b027942eb20 100644 --- a/be/src/olap/partial_update_info.h +++ b/be/src/olap/partial_update_info.h @@ -39,7 +39,7 @@ struct RowsetId; struct PartialUpdateInfo { void init(const TabletSchema& tablet_schema, bool partial_update, const std::set& partial_update_cols, bool is_strict_mode, - int64_t timestamp_ms, const std::string& timezone, + int64_t timestamp_ms, int32_t nano_seconds, const std::string& timezone, const std::string& auto_increment_column, int64_t cur_max_version = -1); void to_pb(PartialUpdateInfoPB* partial_update_info) const; void from_pb(PartialUpdateInfoPB* partial_update_info); @@ -60,6 +60,7 @@ struct PartialUpdateInfo { bool can_insert_new_rows_in_partial_update {true}; bool is_strict_mode {false}; int64_t timestamp_ms {0}; + int32_t nano_seconds {0}; std::string timezone; bool is_input_columns_contains_auto_inc_column = false; bool is_schema_contains_auto_inc_column = false; diff --git a/be/src/olap/rowset/beta_rowset_writer.cpp b/be/src/olap/rowset/beta_rowset_writer.cpp index 45f260bdfa15ef..c469d291f871fa 100644 --- a/be/src/olap/rowset/beta_rowset_writer.cpp +++ b/be/src/olap/rowset/beta_rowset_writer.cpp @@ -282,7 +282,8 @@ Status BaseBetaRowsetWriter::_generate_delete_bitmap(int32_t segment_id) { LOG(INFO) << "[Memtable Flush] construct delete bitmap tablet: " << _context.tablet->tablet_id() << ", rowset_ids: " << _context.mow_context->rowset_ids.size() << ", cur max_version: " << _context.mow_context->max_version - << ", transaction_id: " << _context.mow_context->txn_id + << ", transaction_id: " << _context.mow_context->txn_id << ", delete_bitmap_count: " + << _context.tablet->tablet_meta()->delete_bitmap().get_delete_bitmap_count() << ", cost: " << watch.get_elapse_time_us() << "(us), total rows: " << total_rows; return Status::OK(); } @@ -514,26 +515,27 @@ Status BetaRowsetWriter::_rename_compacted_indices(int64_t begin, int64_t end, u return Status::OK(); } -// return true if there isn't any flying segcompaction, otherwise return false -bool BetaRowsetWriter::_check_and_set_is_doing_segcompaction() { - return !_is_doing_segcompaction.exchange(true); -} - Status BetaRowsetWriter::_segcompaction_if_necessary() { Status status = Status::OK(); - // leave _check_and_set_is_doing_segcompaction as the last condition - // otherwise _segcompacting_cond will never get notified + // if not doing segcompaction, just check segment number if (!config::enable_segcompaction || !_context.enable_segcompaction || !_context.tablet_schema->cluster_key_idxes().empty() || - _context.tablet_schema->num_variant_columns() > 0 || - !_check_and_set_is_doing_segcompaction()) { + _context.tablet_schema->num_variant_columns() > 0) { + return _check_segment_number_limit(_num_segment); + } + // leave _is_doing_segcompaction as the last condition + // otherwise _segcompacting_cond will never get notified + if (_is_doing_segcompaction.exchange(true)) { return status; } if (_segcompaction_status.load() != OK) { status = Status::Error( "BetaRowsetWriter::_segcompaction_if_necessary meet invalid state, error code: {}", _segcompaction_status.load()); - } else if ((_num_segment - _segcompacted_point) >= config::segcompaction_batch_size) { + } else { + status = _check_segment_number_limit(_num_segcompacted); + } + if (status.ok() && (_num_segment - _segcompacted_point) >= config::segcompaction_batch_size) { SegCompactionCandidatesSharedPtr segments; status = _find_longest_consecutive_small_segment(segments); if (LIKELY(status.ok()) && (!segments->empty())) { @@ -719,7 +721,8 @@ Status BetaRowsetWriter::_close_file_writers() { Status BetaRowsetWriter::build(RowsetSharedPtr& rowset) { RETURN_IF_ERROR(_close_file_writers()); - RETURN_NOT_OK_STATUS_WITH_WARN(_check_segment_number_limit(), + const auto total_segment_num = _num_segment - _segcompacted_point + 1 + _num_segcompacted; + RETURN_NOT_OK_STATUS_WITH_WARN(_check_segment_number_limit(total_segment_num), "too many segments when build new rowset"); RETURN_IF_ERROR(_build_rowset_meta(_rowset_meta.get(), true)); if (_is_pending) { @@ -908,11 +911,10 @@ Status BetaRowsetWriter::_create_segment_writer_for_segcompaction( return Status::OK(); } -Status BaseBetaRowsetWriter::_check_segment_number_limit() { - size_t total_segment_num = _num_segment + 1; +Status BaseBetaRowsetWriter::_check_segment_number_limit(size_t segnum) { DBUG_EXECUTE_IF("BetaRowsetWriter._check_segment_number_limit_too_many_segments", - { total_segment_num = dp->param("segnum", 1024); }); - if (UNLIKELY(total_segment_num > config::max_segment_num_per_rowset)) { + { segnum = dp->param("segnum", 1024); }); + if (UNLIKELY(segnum > config::max_segment_num_per_rowset)) { return Status::Error( "too many segments in rowset. tablet_id:{}, rowset_id:{}, max:{}, " "_num_segment:{}, rowset_num_rows:{}", @@ -922,11 +924,10 @@ Status BaseBetaRowsetWriter::_check_segment_number_limit() { return Status::OK(); } -Status BetaRowsetWriter::_check_segment_number_limit() { - size_t total_segment_num = _num_segment - _segcompacted_point + 1 + _num_segcompacted; +Status BetaRowsetWriter::_check_segment_number_limit(size_t segnum) { DBUG_EXECUTE_IF("BetaRowsetWriter._check_segment_number_limit_too_many_segments", - { total_segment_num = dp->param("segnum", 1024); }); - if (UNLIKELY(total_segment_num > config::max_segment_num_per_rowset)) { + { segnum = dp->param("segnum", 1024); }); + if (UNLIKELY(segnum > config::max_segment_num_per_rowset)) { return Status::Error( "too many segments in rowset. tablet_id:{}, rowset_id:{}, max:{}, _num_segment:{}, " "_segcompacted_point:{}, _num_segcompacted:{}, rowset_num_rows:{}", diff --git a/be/src/olap/rowset/beta_rowset_writer.h b/be/src/olap/rowset/beta_rowset_writer.h index a7ec8fe87e9017..a83cf720d95e37 100644 --- a/be/src/olap/rowset/beta_rowset_writer.h +++ b/be/src/olap/rowset/beta_rowset_writer.h @@ -226,7 +226,7 @@ class BaseBetaRowsetWriter : public RowsetWriter { Status _build_rowset_meta(RowsetMeta* rowset_meta, bool check_segment_num = false); Status _create_file_writer(const std::string& path, io::FileWriterPtr& file_writer); virtual Status _close_file_writers(); - virtual Status _check_segment_number_limit(); + virtual Status _check_segment_number_limit(size_t segnum); virtual int64_t _num_seg() const; // build a tmp rowset for load segment to calc delete_bitmap for this segment Status _build_tmp(RowsetSharedPtr& rowset_ptr); @@ -298,7 +298,7 @@ class BetaRowsetWriter : public BaseBetaRowsetWriter { // segment compaction friend class SegcompactionWorker; Status _close_file_writers() override; - Status _check_segment_number_limit() override; + Status _check_segment_number_limit(size_t segnum) override; int64_t _num_seg() const override; Status _wait_flying_segcompaction(); Status _create_segment_writer_for_segcompaction( @@ -307,7 +307,6 @@ class BetaRowsetWriter : public BaseBetaRowsetWriter { Status _segcompaction_rename_last_segments(); Status _load_noncompacted_segment(segment_v2::SegmentSharedPtr& segment, int32_t segment_id); Status _find_longest_consecutive_small_segment(SegCompactionCandidatesSharedPtr& segments); - bool _check_and_set_is_doing_segcompaction(); Status _rename_compacted_segments(int64_t begin, int64_t end); Status _rename_compacted_segment_plain(uint64_t seg_id); Status _rename_compacted_indices(int64_t begin, int64_t end, uint64_t seg_id); diff --git a/be/src/olap/rowset/rowset_meta.cpp b/be/src/olap/rowset/rowset_meta.cpp index 2bc5a6cef858f2..f053ad26d7efb9 100644 --- a/be/src/olap/rowset/rowset_meta.cpp +++ b/be/src/olap/rowset/rowset_meta.cpp @@ -223,6 +223,7 @@ void RowsetMeta::merge_rowset_meta(const RowsetMeta& other) { set_num_segments(num_segments() + other.num_segments()); set_num_rows(num_rows() + other.num_rows()); set_data_disk_size(data_disk_size() + other.data_disk_size()); + set_total_disk_size(total_disk_size() + other.total_disk_size()); set_index_disk_size(index_disk_size() + other.index_disk_size()); for (auto&& key_bound : other.get_segments_key_bounds()) { add_segment_key_bounds(key_bound); diff --git a/be/src/olap/rowset/rowset_meta_manager.cpp b/be/src/olap/rowset/rowset_meta_manager.cpp index 9d1cbd8858983b..454eb1ac643af9 100644 --- a/be/src/olap/rowset/rowset_meta_manager.cpp +++ b/be/src/olap/rowset/rowset_meta_manager.cpp @@ -357,6 +357,69 @@ Status RowsetMetaManager::_get_rowset_binlog_metas(OlapMeta* meta, const TabletU return status; } +Status RowsetMetaManager::get_rowset_binlog_metas(OlapMeta* meta, TabletUid tablet_uid, + Version version, RowsetBinlogMetasPB* metas_pb) { + Status status; + auto tablet_uid_str = tablet_uid.to_string(); + auto prefix_key = make_binlog_meta_key_prefix(tablet_uid); + auto begin_key = make_binlog_meta_key_prefix(tablet_uid, version.first); + auto end_key = make_binlog_meta_key_prefix(tablet_uid, version.second + 1); + auto traverse_func = [meta, metas_pb, &status, &tablet_uid_str, &end_key]( + std::string_view key, std::string_view value) -> bool { + VLOG_DEBUG << fmt::format("get rowset binlog metas, key={}, value={}", key, value); + if (key.compare(end_key) > 0) { // the binlog meta key is binary comparable. + // All binlog meta has been scanned + return false; + } + + if (!starts_with_binlog_meta(key)) { + auto err_msg = fmt::format("invalid binlog meta key:{}", key); + status = Status::InternalError(err_msg); + LOG(WARNING) << err_msg; + return false; + } + + BinlogMetaEntryPB binlog_meta_entry_pb; + if (!binlog_meta_entry_pb.ParseFromArray(value.data(), value.size())) { + auto err_msg = fmt::format("fail to parse binlog meta value:{}", value); + status = Status::InternalError(err_msg); + LOG(WARNING) << err_msg; + return false; + } + + const auto& rowset_id = binlog_meta_entry_pb.rowset_id_v2(); + auto* binlog_meta_pb = metas_pb->add_rowset_binlog_metas(); + binlog_meta_pb->set_rowset_id(rowset_id); + binlog_meta_pb->set_version(binlog_meta_entry_pb.version()); + binlog_meta_pb->set_num_segments(binlog_meta_entry_pb.num_segments()); + binlog_meta_pb->set_meta_key(std::string {key}); + binlog_meta_pb->set_meta(std::string {value}); + + auto binlog_data_key = + make_binlog_data_key(tablet_uid_str, binlog_meta_entry_pb.version(), rowset_id); + std::string binlog_data; + status = meta->get(META_COLUMN_FAMILY_INDEX, binlog_data_key, &binlog_data); + if (!status.ok()) { + LOG(WARNING) << status.to_string(); + return false; + } + binlog_meta_pb->set_data_key(binlog_data_key); + binlog_meta_pb->set_data(binlog_data); + + return true; + }; + + Status iterStatus = + meta->iterate(META_COLUMN_FAMILY_INDEX, begin_key, prefix_key, traverse_func); + if (!iterStatus.ok()) { + LOG(WARNING) << fmt::format( + "fail to iterate binlog meta. prefix_key:{}, version:{}, status:{}", prefix_key, + version.to_string(), iterStatus.to_string()); + return iterStatus; + } + return status; +} + Status RowsetMetaManager::_get_all_rowset_binlog_metas(OlapMeta* meta, const TabletUid tablet_uid, RowsetBinlogMetasPB* metas_pb) { Status status; diff --git a/be/src/olap/rowset/rowset_meta_manager.h b/be/src/olap/rowset/rowset_meta_manager.h index b61e8c0276949f..eb04128fdedaf2 100644 --- a/be/src/olap/rowset/rowset_meta_manager.h +++ b/be/src/olap/rowset/rowset_meta_manager.h @@ -72,6 +72,9 @@ class RowsetMetaManager { static Status get_rowset_binlog_metas(OlapMeta* meta, const TabletUid tablet_uid, const std::vector& binlog_versions, RowsetBinlogMetasPB* metas_pb); + // get all binlog metas of a tablet in version. + static Status get_rowset_binlog_metas(OlapMeta* meta, const TabletUid tablet_uid, + Version version, RowsetBinlogMetasPB* metas_pb); static Status remove_binlog(OlapMeta* meta, const std::string& suffix); static Status ingest_binlog_metas(OlapMeta* meta, TabletUid tablet_uid, RowsetBinlogMetasPB* metas_pb); diff --git a/be/src/olap/rowset/segcompaction.cpp b/be/src/olap/rowset/segcompaction.cpp index 374056f7b9dd96..fc8baf952c1863 100644 --- a/be/src/olap/rowset/segcompaction.cpp +++ b/be/src/olap/rowset/segcompaction.cpp @@ -248,7 +248,9 @@ Status SegcompactionWorker::_do_compact_segments(SegCompactionCandidatesSharedPt } std::vector> column_groups; - Merger::vertical_split_columns(*ctx.tablet_schema, &column_groups); + std::vector key_group_cluster_key_idxes; + Merger::vertical_split_columns(*ctx.tablet_schema, &column_groups, + &key_group_cluster_key_idxes); vectorized::RowSourcesBuffer row_sources_buf(tablet->tablet_id(), tablet->tablet_path(), ReaderType::READER_SEGMENT_COMPACTION); diff --git a/be/src/olap/rowset/segment_v2/column_reader.cpp b/be/src/olap/rowset/segment_v2/column_reader.cpp index 6faefc34142e5f..3c9b5b7ce7e5ab 100644 --- a/be/src/olap/rowset/segment_v2/column_reader.cpp +++ b/be/src/olap/rowset/segment_v2/column_reader.cpp @@ -57,6 +57,7 @@ #include "olap/types.h" // for TypeInfo #include "olap/wrapper_field.h" #include "runtime/decimalv2_value.h" +#include "runtime/define_primitive_type.h" #include "util/binary_cast.hpp" #include "util/bitmap.h" #include "util/block_compression.h" @@ -120,6 +121,7 @@ Status ColumnReader::create_array(const ColumnReaderOptions& opts, const ColumnM if (meta.is_nullable()) { array_reader->_sub_readers[2] = std::move(null_reader); } + array_reader->_meta_type = FieldType::OLAP_FIELD_TYPE_ARRAY; *reader = std::move(array_reader); return Status::OK(); } @@ -160,6 +162,7 @@ Status ColumnReader::create_map(const ColumnReaderOptions& opts, const ColumnMet if (meta.is_nullable()) { map_reader->_sub_readers[3] = std::move(null_reader); } + map_reader->_meta_type = FieldType::OLAP_FIELD_TYPE_MAP; *reader = std::move(map_reader); return Status::OK(); } @@ -180,6 +183,7 @@ Status ColumnReader::create_struct(const ColumnReaderOptions& opts, const Column &sub_reader)); struct_reader->_sub_readers.push_back(std::move(sub_reader)); } + struct_reader->_meta_type = FieldType::OLAP_FIELD_TYPE_STRUCT; *reader = std::move(struct_reader); return Status::OK(); } @@ -197,7 +201,7 @@ Status ColumnReader::create_agg_state(const ColumnReaderOptions& opts, const Col auto data_type = vectorized::DataTypeFactory::instance().create_data_type(meta); const auto* agg_state_type = assert_cast(data_type.get()); - agg_state_type->check_agg_state_compatibility(opts.be_exec_version); + agg_state_type->check_function_compatibility(opts.be_exec_version); auto type = agg_state_type->get_serialized_type()->get_type_as_type_descriptor().type; if (read_as_string(type)) { @@ -206,9 +210,16 @@ Status ColumnReader::create_agg_state(const ColumnReaderOptions& opts, const Col RETURN_IF_ERROR(reader_local->init(&meta)); *reader = std::move(reader_local); return Status::OK(); + } else if (type == PrimitiveType::TYPE_MAP) { + return create_map(opts, meta, file_reader, reader); + } else if (type == PrimitiveType::TYPE_ARRAY) { + return create_array(opts, meta, file_reader, reader); + } else if (type == PrimitiveType::TYPE_STRUCT) { + return create_struct(opts, meta, num_rows, file_reader, reader); } - return Status::InternalError("Not supported"); + return Status::InternalError("Not supported type: {}, serialized type: {}", + agg_state_type->get_name(), int(type)); } Status ColumnReader::create(const ColumnReaderOptions& opts, const ColumnMetaPB& meta, @@ -591,6 +602,9 @@ Status ColumnReader::get_row_ranges_by_bloom_filter(const AndBlockColumnPredicat } Status ColumnReader::_load_ordinal_index(bool use_page_cache, bool kept_in_memory) { + if (!_ordinal_index) { + return Status::InternalError("ordinal_index not inited"); + } return _ordinal_index->load(use_page_cache, kept_in_memory); } diff --git a/be/src/olap/rowset/segment_v2/inverted_index/analyzer/analyzer.cpp b/be/src/olap/rowset/segment_v2/inverted_index/analyzer/analyzer.cpp new file mode 100644 index 00000000000000..8ad1abb322f01f --- /dev/null +++ b/be/src/olap/rowset/segment_v2/inverted_index/analyzer/analyzer.cpp @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h" + +#include "CLucene.h" +#include "CLucene/analysis/LanguageBasedAnalyzer.h" + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wshadow-field" +#endif +#include "CLucene/analysis/standard95/StandardAnalyzer.h" +#ifdef __clang__ +#pragma clang diagnostic pop +#endif +#include "olap/rowset/segment_v2/inverted_index/char_filter/char_filter_factory.h" + +namespace doris::segment_v2::inverted_index { + +std::unique_ptr InvertedIndexAnalyzer::create_reader( + CharFilterMap& char_filter_map) { + std::unique_ptr reader = + std::make_unique>(); + if (!char_filter_map.empty()) { + reader = std::unique_ptr(CharFilterFactory::create( + char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_TYPE], reader.release(), + char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_PATTERN], + char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_REPLACEMENT])); + } + return reader; +} + +std::unique_ptr InvertedIndexAnalyzer::create_analyzer( + const InvertedIndexCtx* inverted_index_ctx) { + std::unique_ptr analyzer; + auto analyser_type = inverted_index_ctx->parser_type; + if (analyser_type == InvertedIndexParserType::PARSER_STANDARD || + analyser_type == InvertedIndexParserType::PARSER_UNICODE) { + analyzer = std::make_unique(); + } else if (analyser_type == InvertedIndexParserType::PARSER_ENGLISH) { + analyzer = std::make_unique>(); + } else if (analyser_type == InvertedIndexParserType::PARSER_CHINESE) { + auto chinese_analyzer = + std::make_unique(L"chinese", false); + chinese_analyzer->initDict(config::inverted_index_dict_path); + auto mode = inverted_index_ctx->parser_mode; + if (mode == INVERTED_INDEX_PARSER_COARSE_GRANULARITY) { + chinese_analyzer->setMode(lucene::analysis::AnalyzerMode::Default); + } else { + chinese_analyzer->setMode(lucene::analysis::AnalyzerMode::All); + } + analyzer = std::move(chinese_analyzer); + } else { + // default + analyzer = std::make_unique>(); + } + // set lowercase + auto lowercase = inverted_index_ctx->lower_case; + if (lowercase == INVERTED_INDEX_PARSER_TRUE) { + analyzer->set_lowercase(true); + } else if (lowercase == INVERTED_INDEX_PARSER_FALSE) { + analyzer->set_lowercase(false); + } + // set stop words + auto stop_words = inverted_index_ctx->stop_words; + if (stop_words == "none") { + analyzer->set_stopwords(nullptr); + } else { + analyzer->set_stopwords(&lucene::analysis::standard95::stop_words); + } + return analyzer; +} + +std::vector InvertedIndexAnalyzer::get_analyse_result( + lucene::util::Reader* reader, lucene::analysis::Analyzer* analyzer, + const std::string& field_name, InvertedIndexQueryType query_type, bool drop_duplicates) { + std::vector analyse_result; + + std::wstring field_ws = StringUtil::string_to_wstring(field_name); + std::unique_ptr token_stream( + analyzer->tokenStream(field_ws.c_str(), reader)); + + lucene::analysis::Token token; + + while (token_stream->next(&token)) { + if (token.termLength() != 0) { + analyse_result.emplace_back(token.termBuffer(), token.termLength()); + } + } + + if (token_stream != nullptr) { + token_stream->close(); + } + + if (drop_duplicates && (query_type == InvertedIndexQueryType::MATCH_ANY_QUERY || + query_type == InvertedIndexQueryType::MATCH_ALL_QUERY)) { + std::set unrepeated_result(analyse_result.begin(), analyse_result.end()); + analyse_result.assign(unrepeated_result.begin(), unrepeated_result.end()); + } + return analyse_result; +} + +} // namespace doris::segment_v2::inverted_index diff --git a/be/src/olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h b/be/src/olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h new file mode 100644 index 00000000000000..ad5d71a536420d --- /dev/null +++ b/be/src/olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "olap/inverted_index_parser.h" +#include "olap/rowset/segment_v2/inverted_index_query_type.h" + +namespace lucene { +namespace util { +class Reader; +} +namespace analysis { +class Analyzer; +} +} // namespace lucene + +namespace doris::segment_v2::inverted_index { +class InvertedIndexAnalyzer { +public: + static std::unique_ptr create_reader(CharFilterMap& char_filter_map); + + static std::unique_ptr create_analyzer( + const InvertedIndexCtx* inverted_index_ctx); + + static std::vector get_analyse_result(lucene::util::Reader* reader, + lucene::analysis::Analyzer* analyzer, + const std::string& field_name, + InvertedIndexQueryType query_type, + bool drop_duplicates = true); +}; +} // namespace doris::segment_v2::inverted_index \ No newline at end of file diff --git a/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp b/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp index b2930d2867b05f..e42c02860f5d00 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp +++ b/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp @@ -135,9 +135,9 @@ void InvertedIndexQueryCache::insert(const CacheKey& key, std::shared_ptrgetSizeInBytes(), - bitmap->getSizeInBytes(), CachePriority::NORMAL); + auto* lru_handle = LRUCachePolicy::insert(key.encode(), (void*)cache_value_ptr.release(), + bitmap->getSizeInBytes(), bitmap->getSizeInBytes(), + CachePriority::NORMAL); *handle = InvertedIndexQueryCacheHandle(this, lru_handle); } diff --git a/be/src/olap/rowset/segment_v2/inverted_index_cache.h b/be/src/olap/rowset/segment_v2/inverted_index_cache.h index 5423ea044a2e58..b80f2c01027b6e 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_cache.h +++ b/be/src/olap/rowset/segment_v2/inverted_index_cache.h @@ -99,23 +99,23 @@ class InvertedIndexSearcherCache { private: InvertedIndexSearcherCache() = default; - class InvertedIndexSearcherCachePolicy : public LRUCachePolicyTrackingManual { + class InvertedIndexSearcherCachePolicy : public LRUCachePolicy { public: InvertedIndexSearcherCachePolicy(size_t capacity, uint32_t num_shards, uint32_t element_count_capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::INVERTEDINDEX_SEARCHER_CACHE, - capacity, LRUCacheType::SIZE, - config::inverted_index_cache_stale_sweep_time_sec, - num_shards, element_count_capacity, true) {} + : LRUCachePolicy(CachePolicy::CacheType::INVERTEDINDEX_SEARCHER_CACHE, capacity, + LRUCacheType::SIZE, + config::inverted_index_cache_stale_sweep_time_sec, num_shards, + element_count_capacity, true) {} InvertedIndexSearcherCachePolicy(size_t capacity, uint32_t num_shards, uint32_t element_count_capacity, CacheValueTimeExtractor cache_value_time_extractor, bool cache_value_check_timestamp) - : LRUCachePolicyTrackingManual( - CachePolicy::CacheType::INVERTEDINDEX_SEARCHER_CACHE, capacity, - LRUCacheType::SIZE, config::inverted_index_cache_stale_sweep_time_sec, - num_shards, element_count_capacity, cache_value_time_extractor, - cache_value_check_timestamp, true) {} + : LRUCachePolicy(CachePolicy::CacheType::INVERTEDINDEX_SEARCHER_CACHE, capacity, + LRUCacheType::SIZE, + config::inverted_index_cache_stale_sweep_time_sec, num_shards, + element_count_capacity, cache_value_time_extractor, + cache_value_check_timestamp, true) {} }; // Insert a cache entry by key. // And the cache entry will be returned in handle. @@ -179,9 +179,9 @@ class InvertedIndexCacheHandle { class InvertedIndexQueryCacheHandle; -class InvertedIndexQueryCache : public LRUCachePolicyTrackingManual { +class InvertedIndexQueryCache : public LRUCachePolicy { public: - using LRUCachePolicyTrackingManual::insert; + using LRUCachePolicy::insert; // cache key struct CacheKey { @@ -227,10 +227,9 @@ class InvertedIndexQueryCache : public LRUCachePolicyTrackingManual { InvertedIndexQueryCache() = delete; InvertedIndexQueryCache(size_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::INVERTEDINDEX_QUERY_CACHE, - capacity, LRUCacheType::SIZE, - config::inverted_index_cache_stale_sweep_time_sec, - num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::INVERTEDINDEX_QUERY_CACHE, capacity, + LRUCacheType::SIZE, config::inverted_index_cache_stale_sweep_time_sec, + num_shards) {} bool lookup(const CacheKey& key, InvertedIndexQueryCacheHandle* handle); diff --git a/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp b/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp index 6cf9de5df1a72a..7b8504322d2687 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp +++ b/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp @@ -17,19 +17,13 @@ #include "olap/rowset/segment_v2/inverted_index_reader.h" -#include -#include -#include #include #include #include -#include #include #include -#include #include #include -#include #include #include #include @@ -40,26 +34,16 @@ #include #include -#include "gutil/integral_types.h" -#include "inverted_index_query_type.h" -#include "olap/rowset/segment_v2/inverted_index/query/phrase_query.h" - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wshadow-field" -#endif -#include "CLucene/analysis/standard95/StandardAnalyzer.h" -#ifdef __clang__ -#pragma clang diagnostic pop -#endif #include "common/config.h" #include "common/logging.h" #include "common/status.h" -#include "io/fs/file_system.h" +#include "gutil/integral_types.h" +#include "inverted_index_query_type.h" #include "olap/inverted_index_parser.h" #include "olap/key_coder.h" #include "olap/olap_common.h" -#include "olap/rowset/segment_v2/inverted_index/char_filter/char_filter_factory.h" +#include "olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h" +#include "olap/rowset/segment_v2/inverted_index/query/phrase_query.h" #include "olap/rowset/segment_v2/inverted_index/query/query_factory.h" #include "olap/rowset/segment_v2/inverted_index_cache.h" #include "olap/rowset/segment_v2/inverted_index_file_reader.h" @@ -114,83 +98,10 @@ CREATE_QUERY_VALUE_TEMPLATE(PrimitiveType::TYPE_STRING) CREATE_QUERY_VALUE_TEMPLATE(PrimitiveType::TYPE_IPV4) CREATE_QUERY_VALUE_TEMPLATE(PrimitiveType::TYPE_IPV6) -std::unique_ptr InvertedIndexReader::create_analyzer( - InvertedIndexCtx* inverted_index_ctx) { - std::unique_ptr analyzer; - auto analyser_type = inverted_index_ctx->parser_type; - if (analyser_type == InvertedIndexParserType::PARSER_STANDARD || - analyser_type == InvertedIndexParserType::PARSER_UNICODE) { - analyzer = std::make_unique(); - } else if (analyser_type == InvertedIndexParserType::PARSER_ENGLISH) { - analyzer = std::make_unique>(); - } else if (analyser_type == InvertedIndexParserType::PARSER_CHINESE) { - auto chinese_analyzer = - std::make_unique(L"chinese", false); - chinese_analyzer->initDict(config::inverted_index_dict_path); - auto mode = inverted_index_ctx->parser_mode; - if (mode == INVERTED_INDEX_PARSER_COARSE_GRANULARITY) { - chinese_analyzer->setMode(lucene::analysis::AnalyzerMode::Default); - } else { - chinese_analyzer->setMode(lucene::analysis::AnalyzerMode::All); - } - analyzer = std::move(chinese_analyzer); - } else { - // default - analyzer = std::make_unique>(); - } - return analyzer; -} - -std::unique_ptr InvertedIndexReader::create_reader( - InvertedIndexCtx* inverted_index_ctx, const std::string& value) { - std::unique_ptr reader = - std::make_unique>(); - CharFilterMap& char_filter_map = inverted_index_ctx->char_filter_map; - if (!char_filter_map.empty()) { - reader = std::unique_ptr(CharFilterFactory::create( - char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_TYPE], reader.release(), - char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_PATTERN], - char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_REPLACEMENT])); - } - reader->init(value.data(), value.size(), true); - return reader; -} - std::string InvertedIndexReader::get_index_file_path() { return _inverted_index_file_reader->get_index_file_path(&_index_meta); } -void InvertedIndexReader::get_analyse_result(std::vector& analyse_result, - lucene::util::Reader* reader, - lucene::analysis::Analyzer* analyzer, - const std::string& field_name, - InvertedIndexQueryType query_type, - bool drop_duplicates) { - analyse_result.clear(); - - std::wstring field_ws = StringUtil::string_to_wstring(field_name); - std::unique_ptr token_stream( - analyzer->tokenStream(field_ws.c_str(), reader)); - - lucene::analysis::Token token; - - while (token_stream->next(&token)) { - if (token.termLength() != 0) { - analyse_result.emplace_back(token.termBuffer(), token.termLength()); - } - } - - if (token_stream != nullptr) { - token_stream->close(); - } - - if (drop_duplicates && (query_type == InvertedIndexQueryType::MATCH_ANY_QUERY || - query_type == InvertedIndexQueryType::MATCH_ALL_QUERY)) { - std::set unrepeated_result(analyse_result.begin(), analyse_result.end()); - analyse_result.assign(unrepeated_result.begin(), unrepeated_result.end()); - } -} - Status InvertedIndexReader::read_null_bitmap(OlapReaderStatistics* stats, InvertedIndexQueryCacheHandle* cache_handle, lucene::store::Directory* dir) { @@ -362,14 +273,17 @@ Status FullTextIndexReader::query(OlapReaderStatistics* stats, RuntimeState* run get_inverted_index_parser_type_from_string( get_parser_string_from_properties(_index_meta.properties())), get_parser_mode_string_from_properties(_index_meta.properties()), - get_parser_char_filter_map_from_properties(_index_meta.properties())); - auto analyzer = create_analyzer(inverted_index_ctx.get()); - setup_analyzer_lowercase(analyzer, _index_meta.properties()); - setup_analyzer_use_stopwords(analyzer, _index_meta.properties()); + get_parser_char_filter_map_from_properties(_index_meta.properties()), + get_parser_lowercase_from_properties(_index_meta.properties()), + get_parser_stopwords_from_properties(_index_meta.properties())); + auto analyzer = inverted_index::InvertedIndexAnalyzer::create_analyzer( + inverted_index_ctx.get()); inverted_index_ctx->analyzer = analyzer.get(); - auto reader = create_reader(inverted_index_ctx.get(), search_str); - get_analyse_result(query_info.terms, reader.get(), analyzer.get(), column_name, - query_type); + auto reader = inverted_index::InvertedIndexAnalyzer::create_reader( + inverted_index_ctx->char_filter_map); + reader->init(search_str.data(), search_str.size(), true); + query_info.terms = inverted_index::InvertedIndexAnalyzer::get_analyse_result( + reader.get(), analyzer.get(), column_name, query_type); } if (query_info.terms.empty()) { auto msg = fmt::format( @@ -433,28 +347,6 @@ InvertedIndexReaderType FullTextIndexReader::type() { return InvertedIndexReaderType::FULLTEXT; } -void FullTextIndexReader::setup_analyzer_lowercase( - std::unique_ptr& analyzer, - const std::map& properties) { - auto lowercase = get_parser_lowercase_from_properties(properties); - if (lowercase == INVERTED_INDEX_PARSER_TRUE) { - analyzer->set_lowercase(true); - } else if (lowercase == INVERTED_INDEX_PARSER_FALSE) { - analyzer->set_lowercase(false); - } -} - -void FullTextIndexReader::setup_analyzer_use_stopwords( - std::unique_ptr& analyzer, - const std::map& properties) { - auto stop_words = get_parser_stopwords_from_properties(properties); - if (stop_words == "none") { - analyzer->set_stopwords(nullptr); - } else { - analyzer->set_stopwords(&lucene::analysis::standard95::stop_words); - } -} - Status StringTypeInvertedIndexReader::new_iterator( OlapReaderStatistics* stats, RuntimeState* runtime_state, std::unique_ptr* iterator) { diff --git a/be/src/olap/rowset/segment_v2/inverted_index_reader.h b/be/src/olap/rowset/segment_v2/inverted_index_reader.h index e8395903d7512d..9bd13309fa76e7 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_reader.h +++ b/be/src/olap/rowset/segment_v2/inverted_index_reader.h @@ -206,17 +206,6 @@ class InvertedIndexReader : public std::enable_shared_from_this& analyse_result, - lucene::util::Reader* reader, - lucene::analysis::Analyzer* analyzer, - const std::string& field_name, InvertedIndexQueryType query_type, - bool drop_duplicates = true); - - static std::unique_ptr create_reader(InvertedIndexCtx* inverted_index_ctx, - const std::string& value); - static std::unique_ptr create_analyzer( - InvertedIndexCtx* inverted_index_ctx); - virtual Status handle_query_cache(InvertedIndexQueryCache* cache, const InvertedIndexQueryCache::CacheKey& cache_key, InvertedIndexQueryCacheHandle* cache_handler, @@ -277,11 +266,6 @@ class FullTextIndexReader : public InvertedIndexReader { } InvertedIndexReaderType type() override; - - static void setup_analyzer_lowercase(std::unique_ptr& analyzer, - const std::map& properties); - static void setup_analyzer_use_stopwords(std::unique_ptr& analyzer, - const std::map& properties); }; class StringTypeInvertedIndexReader : public InvertedIndexReader { diff --git a/be/src/olap/rowset/segment_v2/inverted_index_writer.cpp b/be/src/olap/rowset/segment_v2/inverted_index_writer.cpp index 396c2bc11b6652..8729bd0c590276 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_writer.cpp +++ b/be/src/olap/rowset/segment_v2/inverted_index_writer.cpp @@ -49,6 +49,7 @@ #include "olap/key_coder.h" #include "olap/olap_common.h" #include "olap/rowset/segment_v2/common.h" +#include "olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h" #include "olap/rowset/segment_v2/inverted_index/char_filter/char_filter_factory.h" #include "olap/rowset/segment_v2/inverted_index_desc.h" #include "olap/rowset/segment_v2/inverted_index_file_writer.h" @@ -145,34 +146,14 @@ class InvertedIndexColumnWriterImpl : public InvertedIndexColumnWriter { return open_index_directory(); } - std::unique_ptr create_chinese_analyzer() { - auto chinese_analyzer = std::make_unique(); - chinese_analyzer->setLanguage(L"chinese"); - chinese_analyzer->initDict(config::inverted_index_dict_path); - - auto mode = get_parser_mode_string_from_properties(_index_meta->properties()); - if (mode == INVERTED_INDEX_PARSER_FINE_GRANULARITY) { - chinese_analyzer->setMode(lucene::analysis::AnalyzerMode::All); - } else { - chinese_analyzer->setMode(lucene::analysis::AnalyzerMode::Default); - } - - return chinese_analyzer; - } - - Status create_char_string_reader(std::unique_ptr& string_reader) { - CharFilterMap char_filter_map = - get_parser_char_filter_map_from_properties(_index_meta->properties()); - if (!char_filter_map.empty()) { - string_reader = std::unique_ptr(CharFilterFactory::create( - char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_TYPE], - new lucene::util::SStringReader(), - char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_PATTERN], - char_filter_map[INVERTED_INDEX_PARSER_CHAR_FILTER_REPLACEMENT])); - } else { - string_reader = std::make_unique>(); + Result> create_char_string_reader( + CharFilterMap& char_filter_map) { + try { + return inverted_index::InvertedIndexAnalyzer::create_reader(char_filter_map); + } catch (CLuceneError& e) { + return ResultError(Status::Error( + "inverted index create string reader failed: {}", e.what())); } - return Status::OK(); } Status open_index_directory() { @@ -180,10 +161,10 @@ class InvertedIndexColumnWriterImpl : public InvertedIndexColumnWriter { return Status::OK(); } - Status create_index_writer(std::unique_ptr& index_writer) { + std::unique_ptr create_index_writer() { bool create_index = true; bool close_dir_on_shutdown = true; - index_writer = std::make_unique( + auto index_writer = std::make_unique( _dir, _analyzer.get(), create_index, close_dir_on_shutdown); index_writer->setRAMBufferSizeMB(config::inverted_index_ram_buffer_size); index_writer->setMaxBufferedDocs(config::inverted_index_max_buffered_docs); @@ -191,7 +172,7 @@ class InvertedIndexColumnWriterImpl : public InvertedIndexColumnWriter { index_writer->setMergeFactor(MERGE_FACTOR); index_writer->setUseCompoundFile(false); - return Status::OK(); + return index_writer; } Status create_field(lucene::document::Field** field) { @@ -207,55 +188,29 @@ class InvertedIndexColumnWriterImpl : public InvertedIndexColumnWriter { return Status::OK(); } - Status create_analyzer(std::unique_ptr& analyzer) { + Result> create_analyzer( + std::shared_ptr& inverted_index_ctx) { try { - switch (_parser_type) { - case InvertedIndexParserType::PARSER_STANDARD: - case InvertedIndexParserType::PARSER_UNICODE: - analyzer = std::make_unique(); - break; - case InvertedIndexParserType::PARSER_ENGLISH: - analyzer = std::make_unique>(); - break; - case InvertedIndexParserType::PARSER_CHINESE: - analyzer = create_chinese_analyzer(); - break; - default: - analyzer = std::make_unique>(); - break; - } - setup_analyzer_lowercase(analyzer); - setup_analyzer_use_stopwords(analyzer); - return Status::OK(); + return inverted_index::InvertedIndexAnalyzer::create_analyzer(inverted_index_ctx.get()); } catch (CLuceneError& e) { - return Status::Error( - "inverted index create analyzer failed: {}", e.what()); - } - } - - void setup_analyzer_lowercase(std::unique_ptr& analyzer) { - auto lowercase = get_parser_lowercase_from_properties(_index_meta->properties()); - if (lowercase == INVERTED_INDEX_PARSER_TRUE) { - analyzer->set_lowercase(true); - } else if (lowercase == INVERTED_INDEX_PARSER_FALSE) { - analyzer->set_lowercase(false); - } - } - - void setup_analyzer_use_stopwords(std::unique_ptr& analyzer) { - auto stop_words = get_parser_stopwords_from_properties(_index_meta->properties()); - if (stop_words == "none") { - analyzer->set_stopwords(nullptr); - } else { - analyzer->set_stopwords(&lucene::analysis::standard95::stop_words); + return ResultError(Status::Error( + "inverted index create analyzer failed: {}", e.what())); } } Status init_fulltext_index() { + _inverted_index_ctx = std::make_shared( + get_inverted_index_parser_type_from_string( + get_parser_string_from_properties(_index_meta->properties())), + get_parser_mode_string_from_properties(_index_meta->properties()), + get_parser_char_filter_map_from_properties(_index_meta->properties()), + get_parser_lowercase_from_properties(_index_meta->properties()), + get_parser_stopwords_from_properties(_index_meta->properties())); RETURN_IF_ERROR(open_index_directory()); - RETURN_IF_ERROR(create_char_string_reader(_char_string_reader)); - RETURN_IF_ERROR(create_analyzer(_analyzer)); - RETURN_IF_ERROR(create_index_writer(_index_writer)); + _char_string_reader = + DORIS_TRY(create_char_string_reader(_inverted_index_ctx->char_filter_map)); + _analyzer = DORIS_TRY(create_analyzer(_inverted_index_ctx)); + _index_writer = create_index_writer(); _doc = std::make_unique(); if (_single_field) { RETURN_IF_ERROR(create_field(&_field)); @@ -414,8 +369,9 @@ class InvertedIndexColumnWriterImpl : public InvertedIndexColumnWriter { // stream can not reuse for different field bool own_token_stream = true; bool own_reader = true; - std::unique_ptr char_string_reader = nullptr; - RETURN_IF_ERROR(create_char_string_reader(char_string_reader)); + std::unique_ptr char_string_reader = + DORIS_TRY(create_char_string_reader( + _inverted_index_ctx->char_filter_map)); char_string_reader->init(v->get_data(), v->get_size(), false); _analyzer->set_ownReader(own_reader); ts = _analyzer->tokenStream(new_field->name(), @@ -663,6 +619,7 @@ class InvertedIndexColumnWriterImpl : public InvertedIndexColumnWriter { std::unique_ptr _analyzer = nullptr; std::unique_ptr _char_string_reader = nullptr; std::shared_ptr _bkd_writer = nullptr; + InvertedIndexCtxSPtr _inverted_index_ctx = nullptr; DorisFSDirectory* _dir = nullptr; const KeyCoder* _value_key_coder; const TabletIndex* _index_meta; diff --git a/be/src/olap/rowset/segment_v2/segment.cpp b/be/src/olap/rowset/segment_v2/segment.cpp index 0354cd4e3e7b5a..fe5a4c58ad232c 100644 --- a/be/src/olap/rowset/segment_v2/segment.cpp +++ b/be/src/olap/rowset/segment_v2/segment.cpp @@ -762,7 +762,7 @@ Status Segment::new_column_iterator(const TabletColumn& tablet_column, RETURN_IF_ERROR(_column_readers.at(tablet_column.unique_id())->new_iterator(&it)); iter->reset(it); - if (config::enable_column_type_check && + if (config::enable_column_type_check && !tablet_column.is_agg_state_type() && tablet_column.type() != _column_readers.at(tablet_column.unique_id())->get_meta_type()) { LOG(WARNING) << "different type between schema and column reader," << " column schema name: " << tablet_column.name() diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.cpp b/be/src/olap/rowset/segment_v2/segment_iterator.cpp index 91c6fe3a7048ec..2a7da619c7a3ab 100644 --- a/be/src/olap/rowset/segment_v2/segment_iterator.cpp +++ b/be/src/olap/rowset/segment_v2/segment_iterator.cpp @@ -915,9 +915,9 @@ bool SegmentIterator::_need_read_data(ColumnId cid) { // If any of the above conditions are met, log a debug message indicating that there's no need to read data for the indexed column. // Then, return false. int32_t unique_id = _opts.tablet_schema->column(cid).unique_id(); - if ((_need_read_data_indices.count(cid) > 0 && !_need_read_data_indices[cid] && - _output_columns.count(unique_id) < 1) || - (_need_read_data_indices.count(cid) > 0 && !_need_read_data_indices[cid] && + if ((_need_read_data_indices.contains(cid) && !_need_read_data_indices[cid] && + !_output_columns.contains(unique_id)) || + (_need_read_data_indices.contains(cid) && !_need_read_data_indices[cid] && _output_columns.count(unique_id) == 1 && _opts.push_down_agg_type_opt == TPushAggOp::COUNT_ON_INDEX)) { VLOG_DEBUG << "SegmentIterator no need read data for column: " @@ -1428,8 +1428,6 @@ Status SegmentIterator::_vec_init_lazy_materialization() { pred_id_set.insert(_short_cir_pred_column_ids.begin(), _short_cir_pred_column_ids.end()); pred_id_set.insert(_vec_pred_column_ids.begin(), _vec_pred_column_ids.end()); - std::set non_pred_set(_non_predicate_columns.begin(), - _non_predicate_columns.end()); DCHECK(_second_read_column_ids.empty()); // _second_read_column_ids must be empty. Otherwise _lazy_materialization_read must not false. diff --git a/be/src/olap/rowset/segment_v2/segment_writer.cpp b/be/src/olap/rowset/segment_v2/segment_writer.cpp index 84fa6c9e0041ad..225677f5d1f7d1 100644 --- a/be/src/olap/rowset/segment_v2/segment_writer.cpp +++ b/be/src/olap/rowset/segment_v2/segment_writer.cpp @@ -126,7 +126,7 @@ SegmentWriter::SegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, _key_index_size.clear(); _num_sort_key_columns = _tablet_schema->cluster_key_idxes().size(); for (auto cid : _tablet_schema->cluster_key_idxes()) { - const auto& column = _tablet_schema->column(cid); + const auto& column = _tablet_schema->column_by_uid(cid); _key_coders.push_back(get_key_coder(column.type())); _key_index_size.push_back(column.index_length()); } @@ -535,7 +535,7 @@ Status SegmentWriter::probe_key_for_mow( // 3. set columns to data convertor and then write all columns Status SegmentWriter::append_block_with_partial_content(const vectorized::Block* block, size_t row_pos, size_t num_rows) { - if (block->columns() <= _tablet_schema->num_key_columns() || + if (block->columns() < _tablet_schema->num_key_columns() || block->columns() >= _tablet_schema->num_columns()) { return Status::InvalidArgument( fmt::format("illegal partial update block columns: {}, num key columns: {}, total " @@ -755,17 +755,31 @@ Status SegmentWriter::append_block(const vectorized::Block* block, size_t row_po // 2. generate short key index (use cluster key) key_columns.clear(); for (const auto& cid : _tablet_schema->cluster_key_idxes()) { - for (size_t id = 0; id < _column_writers.size(); ++id) { - // olap data convertor always start from id = 0 - if (cid == _column_ids[id]) { - auto converted_result = _olap_data_convertor->convert_column_data(id); + // find cluster key index in tablet schema + auto cluster_key_index = _tablet_schema->field_index(cid); + if (cluster_key_index == -1) { + return Status::InternalError( + "could not find cluster key column with unique_id=" + + std::to_string(cid) + " in tablet schema"); + } + bool found = false; + for (auto i = 0; i < _column_ids.size(); ++i) { + if (_column_ids[i] == cluster_key_index) { + auto converted_result = _olap_data_convertor->convert_column_data(i); if (!converted_result.first.ok()) { return converted_result.first; } key_columns.push_back(converted_result.second); + found = true; break; } } + if (!found) { + return Status::InternalError( + "could not found cluster key column with unique_id=" + + std::to_string(cid) + + ", tablet schema index=" + std::to_string(cluster_key_index)); + } } RETURN_IF_ERROR(_generate_short_key_index(key_columns, num_rows, short_key_pos)); } else if (_is_mow()) { diff --git a/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp b/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp index cae16d98b7c224..4863f2c0401ae8 100644 --- a/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp +++ b/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp @@ -130,7 +130,7 @@ VerticalSegmentWriter::VerticalSegmentWriter(io::FileWriter* file_writer, uint32 _key_index_size.clear(); _num_sort_key_columns = _tablet_schema->cluster_key_idxes().size(); for (auto cid : _tablet_schema->cluster_key_idxes()) { - const auto& column = _tablet_schema->column(cid); + const auto& column = _tablet_schema->column_by_uid(cid); _key_coders.push_back(get_key_coder(column.type())); _key_index_size.push_back(column.index_length()); } @@ -555,7 +555,7 @@ Status VerticalSegmentWriter::batch_block(const vectorized::Block* block, size_t _opts.rowset_ctx->partial_update_info->is_partial_update && _opts.write_type == DataWriteType::TYPE_DIRECT && !_opts.rowset_ctx->is_transient_rowset_writer) { - if (block->columns() <= _tablet_schema->num_key_columns() || + if (block->columns() < _tablet_schema->num_key_columns() || block->columns() >= _tablet_schema->num_columns()) { return Status::InvalidArgument(fmt::format( "illegal partial update block columns: {}, num key columns: {}, total " @@ -714,6 +714,7 @@ Status VerticalSegmentWriter::write_batch() { std::vector key_columns; vectorized::IOlapColumnDataAccessor* seq_column = nullptr; + // the key is cluster key column unique id std::map cid_to_column; for (uint32_t cid = 0; cid < _tablet_schema->num_columns(); ++cid) { RETURN_IF_ERROR(_create_column_writer(cid, _tablet_schema->column(cid), _tablet_schema)); @@ -732,11 +733,12 @@ Status VerticalSegmentWriter::write_batch() { if (_tablet_schema->has_sequence_col() && cid == _tablet_schema->sequence_col_idx()) { seq_column = column; } + auto column_unique_id = _tablet_schema->column(cid).unique_id(); if (_is_mow_with_cluster_key() && std::find(_tablet_schema->cluster_key_idxes().begin(), _tablet_schema->cluster_key_idxes().end(), - cid) != _tablet_schema->cluster_key_idxes().end()) { - cid_to_column[cid] = column; + column_unique_id) != _tablet_schema->cluster_key_idxes().end()) { + cid_to_column[column_unique_id] = column; } RETURN_IF_ERROR(_column_writers[cid]->append(column->get_nullmap(), column->get_data(), data.num_rows)); diff --git a/be/src/olap/rowset_builder.cpp b/be/src/olap/rowset_builder.cpp index 39fcc3f6c231ab..9dac1f8842d345 100644 --- a/be/src/olap/rowset_builder.cpp +++ b/be/src/olap/rowset_builder.cpp @@ -418,12 +418,12 @@ void BaseRowsetBuilder::_build_current_tablet_schema(int64_t index_id, } // set partial update columns info _partial_update_info = std::make_shared(); - _partial_update_info->init(*_tablet_schema, table_schema_param->is_partial_update(), - table_schema_param->partial_update_input_columns(), - table_schema_param->is_strict_mode(), - table_schema_param->timestamp_ms(), table_schema_param->timezone(), - table_schema_param->auto_increment_coulumn(), - _max_version_in_flush_phase); + _partial_update_info->init( + *_tablet_schema, table_schema_param->is_partial_update(), + table_schema_param->partial_update_input_columns(), + table_schema_param->is_strict_mode(), table_schema_param->timestamp_ms(), + table_schema_param->nano_seconds(), table_schema_param->timezone(), + table_schema_param->auto_increment_coulumn(), _max_version_in_flush_phase); } } // namespace doris diff --git a/be/src/olap/schema_cache.h b/be/src/olap/schema_cache.h index 7bb18a59c349a0..68cd809ed226f4 100644 --- a/be/src/olap/schema_cache.h +++ b/be/src/olap/schema_cache.h @@ -44,7 +44,7 @@ using SegmentIteratorUPtr = std::unique_ptr; // eliminating the need for frequent allocation and deallocation during usage. // This caching mechanism proves immensely advantageous, particularly in scenarios // with high concurrency, where queries are executed simultaneously. -class SchemaCache : public LRUCachePolicyTrackingManual { +class SchemaCache : public LRUCachePolicy { public: static SchemaCache* instance(); @@ -86,9 +86,8 @@ class SchemaCache : public LRUCachePolicyTrackingManual { }; SchemaCache(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::SCHEMA_CACHE, capacity, - LRUCacheType::NUMBER, - config::schema_cache_sweep_time_sec) {} + : LRUCachePolicy(CachePolicy::CacheType::SCHEMA_CACHE, capacity, LRUCacheType::NUMBER, + config::schema_cache_sweep_time_sec) {} private: static constexpr char SCHEMA_DELIMITER = '-'; diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp index 1d7f18bee8b87d..f6cfdf3dbde8d2 100644 --- a/be/src/olap/schema_change.cpp +++ b/be/src/olap/schema_change.cpp @@ -316,9 +316,9 @@ Status BlockChanger::change_block(vectorized::Block* ref_block, if (result_tmp_column_def.column->size() != row_num) { return Status::Error( - "result size invalid, expect={}, real={}; input expr={}", row_num, + "result size invalid, expect={}, real={}; input expr={}, block={}", row_num, result_tmp_column_def.column->size(), - apache::thrift::ThriftDebugString(*expr)); + apache::thrift::ThriftDebugString(*expr), ref_block->dump_structure()); } if (_type == SCHEMA_CHANGE) { diff --git a/be/src/olap/segment_loader.cpp b/be/src/olap/segment_loader.cpp index 12ab89af0be283..fd7e3f476ad082 100644 --- a/be/src/olap/segment_loader.cpp +++ b/be/src/olap/segment_loader.cpp @@ -40,9 +40,9 @@ bool SegmentCache::lookup(const SegmentCache::CacheKey& key, SegmentCacheHandle* void SegmentCache::insert(const SegmentCache::CacheKey& key, SegmentCache::CacheValue& value, SegmentCacheHandle* handle) { - auto* lru_handle = LRUCachePolicyTrackingManual::insert( - key.encode(), &value, value.segment->meta_mem_usage(), value.segment->meta_mem_usage(), - CachePriority::NORMAL); + auto* lru_handle = + LRUCachePolicy::insert(key.encode(), &value, value.segment->meta_mem_usage(), + value.segment->meta_mem_usage(), CachePriority::NORMAL); handle->push_segment(this, lru_handle); } diff --git a/be/src/olap/segment_loader.h b/be/src/olap/segment_loader.h index 5bb8fae3c41877..d177024242db33 100644 --- a/be/src/olap/segment_loader.h +++ b/be/src/olap/segment_loader.h @@ -55,9 +55,9 @@ class BetaRowset; // Make sure that cache_handle is valid during the segment usage period. using BetaRowsetSharedPtr = std::shared_ptr; -class SegmentCache : public LRUCachePolicyTrackingManual { +class SegmentCache : public LRUCachePolicy { public: - using LRUCachePolicyTrackingManual::insert; + using LRUCachePolicy::insert; // The cache key or segment lru cache struct CacheKey { CacheKey(RowsetId rowset_id_, int64_t segment_id_) @@ -81,10 +81,9 @@ class SegmentCache : public LRUCachePolicyTrackingManual { }; SegmentCache(size_t memory_bytes_limit, size_t segment_num_limit) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::SEGMENT_CACHE, - memory_bytes_limit, LRUCacheType::SIZE, - config::tablet_rowset_stale_sweep_time_sec, - DEFAULT_LRU_CACHE_NUM_SHARDS * 2, segment_num_limit) {} + : LRUCachePolicy(CachePolicy::CacheType::SEGMENT_CACHE, memory_bytes_limit, + LRUCacheType::SIZE, config::tablet_rowset_stale_sweep_time_sec, + DEFAULT_LRU_CACHE_NUM_SHARDS * 2, segment_num_limit) {} // Lookup the given segment in the cache. // If the segment is found, the cache entry will be written into handle. diff --git a/be/src/olap/snapshot_manager.cpp b/be/src/olap/snapshot_manager.cpp index c7790f6f354ec2..e0bfaf704dd7e7 100644 --- a/be/src/olap/snapshot_manager.cpp +++ b/be/src/olap/snapshot_manager.cpp @@ -93,10 +93,15 @@ Status SnapshotManager::make_snapshot(const TSnapshotRequest& request, string* s TabletSharedPtr ref_tablet = target_tablet; if (request.__isset.ref_tablet_id) { int64_t ref_tablet_id = request.ref_tablet_id; - ref_tablet = _engine.tablet_manager()->get_tablet(ref_tablet_id); - if (ref_tablet == nullptr) { - return Status::Error("failed to get ref tablet. tablet={}", - ref_tablet_id); + TabletSharedPtr base_tablet = _engine.tablet_manager()->get_tablet(ref_tablet_id); + + // Some tasks, like medium migration, cause the target tablet and base tablet to stay on + // different disks. In this case, we fall through to the normal restore path. + // + // Otherwise, we can directly link the rowset files from the base tablet to the target tablet. + if (base_tablet != nullptr && + base_tablet->data_dir()->path() == target_tablet->data_dir()->path()) { + ref_tablet = std::move(base_tablet); } } diff --git a/be/src/olap/storage_engine.h b/be/src/olap/storage_engine.h index d7ccd4597d6ef3..b2a313adcdbb7e 100644 --- a/be/src/olap/storage_engine.h +++ b/be/src/olap/storage_engine.h @@ -540,7 +540,7 @@ class StorageEngine final : public BaseStorageEngine { // lru cache for create tabelt round robin in disks // key: partitionId_medium // value: index -class CreateTabletIdxCache : public LRUCachePolicyTrackingManual { +class CreateTabletIdxCache : public LRUCachePolicy { public: // get key, delimiter with DELIMITER '-' static std::string get_key(int64_t partition_id, TStorageMedium::type medium) { @@ -558,9 +558,9 @@ class CreateTabletIdxCache : public LRUCachePolicyTrackingManual { }; CreateTabletIdxCache(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::CREATE_TABLET_RR_IDX_CACHE, - capacity, LRUCacheType::NUMBER, - /*stale_sweep_time_s*/ 30 * 60) {} + : LRUCachePolicy(CachePolicy::CacheType::CREATE_TABLET_RR_IDX_CACHE, capacity, + LRUCacheType::NUMBER, + /*stale_sweep_time_s*/ 30 * 60) {} }; struct DirInfo { diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp index 66278afdb666ee..51eabe5495ef89 100644 --- a/be/src/olap/tablet.cpp +++ b/be/src/olap/tablet.cpp @@ -57,6 +57,7 @@ #include "agent/utils.h" #include "common/config.h" #include "common/consts.h" +#include "common/exception.h" #include "common/logging.h" #include "common/signal_handler.h" #include "common/status.h" @@ -855,8 +856,8 @@ Status Tablet::capture_consistent_versions_unlocked(const Version& spec_version, << ", version already has been merged. spec_version: " << spec_version << ", max_version: " << max_version_unlocked(); } - status = Status::Error( - "missed_versions is empty, spec_version " + status = Status::Error( + "versions are already compacted, spec_version " "{}, max_version {}, tablet_id {}", spec_version.second, max_version_unlocked(), tablet_id()); } else { @@ -1788,8 +1789,7 @@ void Tablet::execute_compaction(CompactionMixin& compaction) { MonotonicStopWatch watch; watch.start(); - Status res = compaction.execute_compact(); - + Status res = [&]() { RETURN_IF_CATCH_EXCEPTION({ return compaction.execute_compact(); }); }(); if (!res.ok()) [[unlikely]] { set_last_failure_time(this, compaction, UnixMillis()); LOG(WARNING) << "failed to do " << compaction.compaction_name() @@ -2512,6 +2512,11 @@ Status Tablet::get_rowset_binlog_metas(const std::vector& binlog_versio binlog_versions, metas_pb); } +Status Tablet::get_rowset_binlog_metas(Version binlog_versions, RowsetBinlogMetasPB* metas_pb) { + return RowsetMetaManager::get_rowset_binlog_metas(_data_dir->get_meta(), tablet_uid(), + binlog_versions, metas_pb); +} + std::string Tablet::get_segment_filepath(std::string_view rowset_id, std::string_view segment_index) const { return fmt::format("{}/_binlog/{}_{}.dat", _tablet_path, rowset_id, segment_index); diff --git a/be/src/olap/tablet.h b/be/src/olap/tablet.h index 800c720a1c4431..33253e82ced2b5 100644 --- a/be/src/olap/tablet.h +++ b/be/src/olap/tablet.h @@ -436,6 +436,7 @@ class Tablet final : public BaseTablet { std::string_view rowset_id) const; Status get_rowset_binlog_metas(const std::vector& binlog_versions, RowsetBinlogMetasPB* metas_pb); + Status get_rowset_binlog_metas(Version binlog_versions, RowsetBinlogMetasPB* metas_pb); std::string get_segment_filepath(std::string_view rowset_id, std::string_view segment_index) const; std::string get_segment_filepath(std::string_view rowset_id, int64_t segment_index) const; diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp index bc883185465629..468a6b2fb126f0 100644 --- a/be/src/olap/tablet_manager.cpp +++ b/be/src/olap/tablet_manager.cpp @@ -90,8 +90,7 @@ bvar::Adder g_tablet_meta_schema_columns_count("tablet_meta_schema_colu TabletManager::TabletManager(StorageEngine& engine, int32_t tablet_map_lock_shard_size) : _engine(engine), - _tablet_meta_mem_tracker(std::make_shared( - "TabletMeta(experimental)", ExecEnv::GetInstance()->details_mem_tracker_set())), + _tablet_meta_mem_tracker(std::make_shared("TabletMeta(experimental)")), _tablets_shards_size(tablet_map_lock_shard_size), _tablets_shards_mask(tablet_map_lock_shard_size - 1) { CHECK_GT(_tablets_shards_size, 0); @@ -279,6 +278,7 @@ Status TabletManager::create_tablet(const TCreateTabletReq& request, std::vector // we need use write lock on shard-1 and then use read lock on shard-2 // if there have create rollup tablet C(assume on shard-2) from tablet D(assume on shard-1) at the same time, we will meet deadlock std::unique_lock two_tablet_lock(_two_tablet_mtx, std::defer_lock); + bool in_restore_mode = request.__isset.in_restore_mode && request.in_restore_mode; bool is_schema_change_or_atomic_restore = request.__isset.base_tablet_id && request.base_tablet_id > 0; bool need_two_lock = @@ -325,14 +325,20 @@ Status TabletManager::create_tablet(const TCreateTabletReq& request, std::vector if (base_tablet == nullptr) { DorisMetrics::instance()->create_tablet_requests_failed->increment(1); return Status::Error( - "fail to create tablet(change schema), base tablet does not exist. " - "new_tablet_id={}, base_tablet_id={}", + "fail to create tablet(change schema/atomic restore), base tablet does not " + "exist. new_tablet_id={}, base_tablet_id={}", tablet_id, request.base_tablet_id); } // If we are doing schema-change or atomic-restore, we should use the same data dir // TODO(lingbin): A litter trick here, the directory should be determined before // entering this method - if (request.storage_medium == base_tablet->data_dir()->storage_medium()) { + // + // ATTN: Since all restored replicas will be saved to HDD, so no storage_medium check here. + if (in_restore_mode || + request.storage_medium == base_tablet->data_dir()->storage_medium()) { + LOG(INFO) << "create tablet use the base tablet data dir. tablet_id=" << tablet_id + << ", base tablet_id=" << request.base_tablet_id + << ", data dir=" << base_tablet->data_dir()->path(); stores.clear(); stores.push_back(base_tablet->data_dir()); } @@ -966,6 +972,7 @@ Status TabletManager::load_tablet_from_dir(DataDir* store, TTabletId tablet_id, if (binlog_meta_filesize > 0) { contain_binlog = true; RETURN_IF_ERROR(read_pb(binlog_metas_file, &rowset_binlog_metas_pb)); + VLOG_DEBUG << "load rowset binlog metas from file. file_path=" << binlog_metas_file; } RETURN_IF_ERROR(io::global_local_filesystem()->delete_file(binlog_metas_file)); } diff --git a/be/src/olap/tablet_meta.cpp b/be/src/olap/tablet_meta.cpp index fbb924089b1eb6..a0d6213860cb2b 100644 --- a/be/src/olap/tablet_meta.cpp +++ b/be/src/olap/tablet_meta.cpp @@ -30,6 +30,8 @@ #include #include +#include "cloud/cloud_meta_mgr.h" +#include "cloud/cloud_storage_engine.h" #include "cloud/config.h" #include "common/config.h" #include "gutil/integral_types.h" @@ -1082,6 +1084,7 @@ bool DeleteBitmap::empty() const { } uint64_t DeleteBitmap::cardinality() const { + std::shared_lock l(lock); uint64_t res = 0; for (auto entry : delete_bitmap) { res += entry.second.cardinality(); @@ -1089,6 +1092,15 @@ uint64_t DeleteBitmap::cardinality() const { return res; } +size_t DeleteBitmap::get_size() const { + std::shared_lock l(lock); + size_t charge = 0; + for (auto& [k, v] : delete_bitmap) { + charge += v.getSizeInBytes(); + } + return charge; +} + bool DeleteBitmap::contains_agg_without_cache(const BitmapKey& bmk, uint32_t row_id) const { std::shared_lock l(lock); DeleteBitmap::BitmapKey start {std::get<0>(bmk), std::get<1>(bmk), 0}; @@ -1166,6 +1178,52 @@ void DeleteBitmap::merge(const DeleteBitmap& other) { } } +void DeleteBitmap::add_to_remove_queue( + const std::string& version_str, + const std::vector>& + vector) { + std::shared_lock l(stale_delete_bitmap_lock); + _stale_delete_bitmap.emplace(version_str, vector); +} + +void DeleteBitmap::remove_stale_delete_bitmap_from_queue(const std::vector& vector) { + std::shared_lock l(stale_delete_bitmap_lock); + // + std::vector> to_delete; + auto tablet_id = -1; + for (auto& version_str : vector) { + auto it = _stale_delete_bitmap.find(version_str); + if (it != _stale_delete_bitmap.end()) { + auto delete_bitmap_vector = it->second; + for (auto& delete_bitmap_tuple : it->second) { + if (tablet_id < 0) { + tablet_id = std::get<0>(delete_bitmap_tuple); + } + auto start_bmk = std::get<1>(delete_bitmap_tuple); + auto end_bmk = std::get<2>(delete_bitmap_tuple); + remove(start_bmk, end_bmk); + to_delete.emplace_back(std::make_tuple(std::get<0>(start_bmk).to_string(), 0, + std::get<2>(end_bmk))); + } + _stale_delete_bitmap.erase(version_str); + } + } + if (tablet_id == -1 || to_delete.empty()) { + return; + } + CloudStorageEngine& engine = ExecEnv::GetInstance()->storage_engine().to_cloud(); + auto st = engine.meta_mgr().remove_old_version_delete_bitmap(tablet_id, to_delete); + if (!st.ok()) { + LOG(WARNING) << "fail to remove_stale_delete_bitmap_from_queue for tablet=" << tablet_id + << ",st=" << st; + } +} + +uint64_t DeleteBitmap::get_delete_bitmap_count() { + std::shared_lock l(lock); + return delete_bitmap.size(); +} + // We cannot just copy the underlying memory to construct a string // due to equivalent objects may have different padding bytes. // Reading padding bytes is undefined behavior, neither copy nor diff --git a/be/src/olap/tablet_meta.h b/be/src/olap/tablet_meta.h index 74ab71d0586fa0..9017e8baf32d0b 100644 --- a/be/src/olap/tablet_meta.h +++ b/be/src/olap/tablet_meta.h @@ -371,6 +371,7 @@ class TabletMeta { class DeleteBitmap { public: mutable std::shared_mutex lock; + mutable std::shared_mutex stale_delete_bitmap_lock; using SegmentId = uint32_t; using Version = uint64_t; using BitmapKey = std::tuple; @@ -450,6 +451,12 @@ class DeleteBitmap { */ uint64_t cardinality() const; + /** + * return the total size of the Delete Bitmap(after serialized) + */ + + size_t get_size() const; + /** * Sets the bitmap of specific segment, it's may be insertion or replacement * @@ -520,13 +527,19 @@ class DeleteBitmap { void remove_sentinel_marks(); - class AggCachePolicy : public LRUCachePolicyTrackingManual { + void add_to_remove_queue(const std::string& version_str, + const std::vector>& vector); + void remove_stale_delete_bitmap_from_queue(const std::vector& vector); + + uint64_t get_delete_bitmap_count(); + + class AggCachePolicy : public LRUCachePolicy { public: AggCachePolicy(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::DELETE_BITMAP_AGG_CACHE, - capacity, LRUCacheType::SIZE, - config::delete_bitmap_agg_cache_stale_sweep_time_sec, - 256) {} + : LRUCachePolicy(CachePolicy::CacheType::DELETE_BITMAP_AGG_CACHE, capacity, + LRUCacheType::SIZE, + config::delete_bitmap_agg_cache_stale_sweep_time_sec, 256) {} }; class AggCache { @@ -554,6 +567,10 @@ class DeleteBitmap { private: mutable std::shared_ptr _agg_cache; int64_t _tablet_id; + // > + std::map>> + _stale_delete_bitmap; }; static const std::string SEQUENCE_COL = "__DORIS_SEQUENCE_COL__"; diff --git a/be/src/olap/tablet_schema.cpp b/be/src/olap/tablet_schema.cpp index 813a5e5519f35a..83b2bd4f702571 100644 --- a/be/src/olap/tablet_schema.cpp +++ b/be/src/olap/tablet_schema.cpp @@ -675,7 +675,7 @@ bool TabletColumn::is_row_store_column() const { vectorized::AggregateFunctionPtr TabletColumn::get_aggregate_function_union( vectorized::DataTypePtr type, int current_be_exec_version) const { const auto* state_type = assert_cast(type.get()); - BeExecVersionManager::check_agg_state_compatibility( + BeExecVersionManager::check_function_compatibility( current_be_exec_version, _be_exec_version, state_type->get_nested_function()->get_name()); return vectorized::AggregateStateUnion::create(state_type->get_nested_function(), {type}, type); diff --git a/be/src/olap/tablet_schema.h b/be/src/olap/tablet_schema.h index 1d1d6c9de79d24..b7fe0e9310183d 100644 --- a/be/src/olap/tablet_schema.h +++ b/be/src/olap/tablet_schema.h @@ -90,6 +90,7 @@ class TabletColumn { bool is_bf_column() const { return _is_bf_column; } bool has_bitmap_index() const { return _has_bitmap_index; } bool is_array_type() const { return _type == FieldType::OLAP_FIELD_TYPE_ARRAY; } + bool is_agg_state_type() const { return _type == FieldType::OLAP_FIELD_TYPE_AGG_STATE; } bool is_jsonb_type() const { return _type == FieldType::OLAP_FIELD_TYPE_JSONB; } bool is_length_variable_type() const { return _type == FieldType::OLAP_FIELD_TYPE_CHAR || diff --git a/be/src/olap/tablet_schema_cache.cpp b/be/src/olap/tablet_schema_cache.cpp index 51618f590a7dd2..e339c947bb97a4 100644 --- a/be/src/olap/tablet_schema_cache.cpp +++ b/be/src/olap/tablet_schema_cache.cpp @@ -40,8 +40,8 @@ std::pair TabletSchemaCache::insert(const std: pb.ParseFromString(key); tablet_schema_ptr->init_from_pb(pb); value->tablet_schema = tablet_schema_ptr; - lru_handle = LRUCachePolicyTrackingManual::insert( - key, value, tablet_schema_ptr->num_columns(), 0, CachePriority::NORMAL); + lru_handle = LRUCachePolicy::insert(key, value, tablet_schema_ptr->num_columns(), 0, + CachePriority::NORMAL); g_tablet_schema_cache_count << 1; g_tablet_schema_cache_columns_count << tablet_schema_ptr->num_columns(); } diff --git a/be/src/olap/tablet_schema_cache.h b/be/src/olap/tablet_schema_cache.h index 10462804ed2012..e18892a3ca5f06 100644 --- a/be/src/olap/tablet_schema_cache.h +++ b/be/src/olap/tablet_schema_cache.h @@ -23,14 +23,13 @@ namespace doris { -class TabletSchemaCache : public LRUCachePolicyTrackingManual { +class TabletSchemaCache : public LRUCachePolicy { public: - using LRUCachePolicyTrackingManual::insert; + using LRUCachePolicy::insert; TabletSchemaCache(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::TABLET_SCHEMA_CACHE, capacity, - LRUCacheType::NUMBER, - config::tablet_schema_cache_recycle_interval) {} + : LRUCachePolicy(CachePolicy::CacheType::TABLET_SCHEMA_CACHE, capacity, + LRUCacheType::NUMBER, config::tablet_schema_cache_recycle_interval) {} static TabletSchemaCache* create_global_schema_cache(size_t capacity) { auto* res = new TabletSchemaCache(capacity); diff --git a/be/src/olap/task/engine_publish_version_task.cpp b/be/src/olap/task/engine_publish_version_task.cpp index 09238f570b7567..dae4c6be814d5a 100644 --- a/be/src/olap/task/engine_publish_version_task.cpp +++ b/be/src/olap/task/engine_publish_version_task.cpp @@ -230,16 +230,22 @@ Status EnginePublishVersionTask::execute() { int64_t missed_version = max_version + 1; int64_t missed_txn_id = _engine.txn_manager()->get_txn_by_tablet_version( tablet->tablet_id(), missed_version); - auto msg = fmt::format( - "uniq key with merge-on-write version not continuous, " - "missed version={}, it's transaction_id={}, current publish " - "version={}, tablet_id={}, transaction_id={}", - missed_version, missed_txn_id, version.second, tablet->tablet_id(), - _publish_version_req.transaction_id); - if (first_time_update) { - LOG(INFO) << msg; - } else { - LOG_EVERY_SECOND(INFO) << msg; + bool need_log = + (config::publish_version_gap_logging_threshold < 0 || + max_version + config::publish_version_gap_logging_threshold >= + version.second); + if (need_log) { + auto msg = fmt::format( + "uniq key with merge-on-write version not continuous, " + "missed version={}, it's transaction_id={}, current publish " + "version={}, tablet_id={}, transaction_id={}", + missed_version, missed_txn_id, version.second, + tablet->tablet_id(), _publish_version_req.transaction_id); + if (first_time_update) { + LOG(INFO) << msg; + } else { + LOG_EVERY_SECOND(INFO) << msg; + } } }; // The versions during the schema change period need to be also continuous diff --git a/be/src/olap/task/engine_storage_migration_task.cpp b/be/src/olap/task/engine_storage_migration_task.cpp index 7c870a5e8ea5fc..21be34a334dd8d 100644 --- a/be/src/olap/task/engine_storage_migration_task.cpp +++ b/be/src/olap/task/engine_storage_migration_task.cpp @@ -37,6 +37,7 @@ #include "olap/data_dir.h" #include "olap/olap_common.h" #include "olap/olap_define.h" +#include "olap/pb_helper.h" #include "olap/rowset/rowset_meta.h" #include "olap/snapshot_manager.h" #include "olap/storage_engine.h" @@ -262,9 +263,11 @@ Status EngineStorageMigrationTask::_migrate() { } std::vector temp_consistent_rowsets(consistent_rowsets); + RowsetBinlogMetasPB rowset_binlog_metas_pb; do { // migrate all index and data files but header file - res = _copy_index_and_data_files(full_path, temp_consistent_rowsets); + res = _copy_index_and_data_files(full_path, temp_consistent_rowsets, + &rowset_binlog_metas_pb); if (!res.ok()) { break; } @@ -292,7 +295,8 @@ Status EngineStorageMigrationTask::_migrate() { // we take the lock to complete it to avoid long-term competition with other tasks if (_is_rowsets_size_less_than_threshold(temp_consistent_rowsets)) { // force to copy the remaining data and index - res = _copy_index_and_data_files(full_path, temp_consistent_rowsets); + res = _copy_index_and_data_files(full_path, temp_consistent_rowsets, + &rowset_binlog_metas_pb); if (!res.ok()) { break; } @@ -307,6 +311,16 @@ Status EngineStorageMigrationTask::_migrate() { } } + // save rowset binlog metas + if (rowset_binlog_metas_pb.rowset_binlog_metas_size() > 0) { + auto rowset_binlog_metas_pb_filename = + fmt::format("{}/rowset_binlog_metas.pb", full_path); + res = write_pb(rowset_binlog_metas_pb_filename, rowset_binlog_metas_pb); + if (!res.ok()) { + break; + } + } + // generate new tablet meta and write to hdr file res = _gen_and_write_header_to_hdr_file(shard, full_path, consistent_rowsets, end_version); if (!res.ok()) { @@ -350,10 +364,92 @@ void EngineStorageMigrationTask::_generate_new_header( } Status EngineStorageMigrationTask::_copy_index_and_data_files( - const string& full_path, const std::vector& consistent_rowsets) const { + const string& full_path, const std::vector& consistent_rowsets, + RowsetBinlogMetasPB* all_binlog_metas_pb) const { + RowsetBinlogMetasPB rowset_binlog_metas_pb; for (const auto& rs : consistent_rowsets) { RETURN_IF_ERROR(rs->copy_files_to(full_path, rs->rowset_id())); + + Version binlog_versions = rs->version(); + RETURN_IF_ERROR(_tablet->get_rowset_binlog_metas(binlog_versions, &rowset_binlog_metas_pb)); + } + + // copy index binlog files. + for (const auto& rowset_binlog_meta : rowset_binlog_metas_pb.rowset_binlog_metas()) { + auto num_segments = rowset_binlog_meta.num_segments(); + std::string_view rowset_id = rowset_binlog_meta.rowset_id(); + + RowsetMetaPB rowset_meta_pb; + if (!rowset_meta_pb.ParseFromString(rowset_binlog_meta.data())) { + auto err_msg = fmt::format("fail to parse binlog meta data value:{}", + rowset_binlog_meta.data()); + LOG(WARNING) << err_msg; + return Status::InternalError(err_msg); + } + const auto& tablet_schema_pb = rowset_meta_pb.tablet_schema(); + TabletSchema tablet_schema; + tablet_schema.init_from_pb(tablet_schema_pb); + + // copy segment files and index files + for (int64_t segment_index = 0; segment_index < num_segments; ++segment_index) { + std::string segment_file_path = _tablet->get_segment_filepath(rowset_id, segment_index); + auto snapshot_segment_file_path = + fmt::format("{}/{}_{}.binlog", full_path, rowset_id, segment_index); + + Status status = io::global_local_filesystem()->copy_path(segment_file_path, + snapshot_segment_file_path); + if (!status.ok()) { + LOG(WARNING) << "fail to copy binlog segment file. [src=" << segment_file_path + << ", dest=" << snapshot_segment_file_path << "]" << status; + return status; + } + VLOG_DEBUG << "copy " << segment_file_path << " to " << snapshot_segment_file_path; + + if (tablet_schema.get_inverted_index_storage_format() == + InvertedIndexStorageFormatPB::V1) { + for (const auto& index : tablet_schema.indexes()) { + if (index.index_type() != IndexType::INVERTED) { + continue; + } + auto index_id = index.index_id(); + auto index_file = + _tablet->get_segment_index_filepath(rowset_id, segment_index, index_id); + auto snapshot_segment_index_file_path = + fmt::format("{}/{}_{}_{}.binlog-index", full_path, rowset_id, + segment_index, index_id); + VLOG_DEBUG << "copy " << index_file << " to " + << snapshot_segment_index_file_path; + status = io::global_local_filesystem()->copy_path( + index_file, snapshot_segment_index_file_path); + if (!status.ok()) { + LOG(WARNING) + << "fail to copy binlog index file. [src=" << index_file + << ", dest=" << snapshot_segment_index_file_path << "]" << status; + return status; + } + } + } else if (tablet_schema.has_inverted_index()) { + auto index_file = InvertedIndexDescriptor::get_index_file_path_v2( + InvertedIndexDescriptor::get_index_file_path_prefix(segment_file_path)); + auto snapshot_segment_index_file_path = + fmt::format("{}/{}_{}.binlog-index", full_path, rowset_id, segment_index); + VLOG_DEBUG << "copy " << index_file << " to " << snapshot_segment_index_file_path; + status = io::global_local_filesystem()->copy_path(index_file, + snapshot_segment_index_file_path); + if (!status.ok()) { + LOG(WARNING) << "fail to copy binlog index file. [src=" << index_file + << ", dest=" << snapshot_segment_index_file_path << "]" << status; + return status; + } + } + } } + + std::move(rowset_binlog_metas_pb.mutable_rowset_binlog_metas()->begin(), + rowset_binlog_metas_pb.mutable_rowset_binlog_metas()->end(), + google::protobuf::RepeatedFieldBackInserter( + all_binlog_metas_pb->mutable_rowset_binlog_metas())); + return Status::OK(); } diff --git a/be/src/olap/task/engine_storage_migration_task.h b/be/src/olap/task/engine_storage_migration_task.h index 8858854de921d4..7578b7de94f352 100644 --- a/be/src/olap/task/engine_storage_migration_task.h +++ b/be/src/olap/task/engine_storage_migration_task.h @@ -17,6 +17,8 @@ #pragma once +#include + #include #include #include @@ -69,7 +71,8 @@ class EngineStorageMigrationTask final : public EngineTask { // TODO: hkp // rewrite this function Status _copy_index_and_data_files(const std::string& full_path, - const std::vector& consistent_rowsets) const; + const std::vector& consistent_rowsets, + RowsetBinlogMetasPB* all_binlog_metas_pb) const; private: StorageEngine& _engine; diff --git a/be/src/olap/txn_manager.h b/be/src/olap/txn_manager.h index 5944bbf0fc3136..88ee97c5f6a3b9 100644 --- a/be/src/olap/txn_manager.h +++ b/be/src/olap/txn_manager.h @@ -282,13 +282,12 @@ class TxnManager { void _insert_txn_partition_map_unlocked(int64_t transaction_id, int64_t partition_id); void _clear_txn_partition_map_unlocked(int64_t transaction_id, int64_t partition_id); - class TabletVersionCache : public LRUCachePolicyTrackingManual { + class TabletVersionCache : public LRUCachePolicy { public: TabletVersionCache(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::TABLET_VERSION_CACHE, - capacity, LRUCacheType::NUMBER, -1, - DEFAULT_LRU_CACHE_NUM_SHARDS, - DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY, false) {} + : LRUCachePolicy(CachePolicy::CacheType::TABLET_VERSION_CACHE, capacity, + LRUCacheType::NUMBER, -1, DEFAULT_LRU_CACHE_NUM_SHARDS, + DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY, false) {} }; private: diff --git a/be/src/olap/version_graph.cpp b/be/src/olap/version_graph.cpp index abdf82d8953ebf..010e7ce9fd89a5 100644 --- a/be/src/olap/version_graph.cpp +++ b/be/src/olap/version_graph.cpp @@ -566,8 +566,9 @@ void VersionGraph::_add_vertex_to_graph(int64_t vertex_value) { Status VersionGraph::capture_consistent_versions(const Version& spec_version, std::vector* version_path) const { if (spec_version.first > spec_version.second) { - return Status::Error("invalid specified version. spec_version={}-{}", - spec_version.first, spec_version.second); + return Status::Error( + "invalid specified version. spec_version={}-{}", spec_version.first, + spec_version.second); } int64_t cur_idx = -1; @@ -579,8 +580,9 @@ Status VersionGraph::capture_consistent_versions(const Version& spec_version, } if (cur_idx < 0) { - return Status::InternalError("failed to find path in version_graph. spec_version: {}-{}", - spec_version.first, spec_version.second); + return Status::InternalError( + "failed to find path in version_graph. spec_version: {}-{}", spec_version.first, + spec_version.second); } int64_t end_value = spec_version.second + 1; @@ -609,8 +611,9 @@ Status VersionGraph::capture_consistent_versions(const Version& spec_version, } cur_idx = next_idx; } else { - return Status::InternalError("fail to find path in version_graph. spec_version: {}-{}", - spec_version.first, spec_version.second); + return Status::InternalError( + "fail to find path in version_graph. spec_version: {}-{}", spec_version.first, + spec_version.second); } } diff --git a/be/src/pipeline/exec/analytic_source_operator.cpp b/be/src/pipeline/exec/analytic_source_operator.cpp index b521a9b583fa94..b9e48727656e05 100644 --- a/be/src/pipeline/exec/analytic_source_operator.cpp +++ b/be/src/pipeline/exec/analytic_source_operator.cpp @@ -355,6 +355,7 @@ Status AnalyticLocalState::_get_next_for_rows(size_t current_block_rows) { 1; //going on calculate,add up data, no need to reset state } else { _reset_agg_status(); + range_end = _shared_state->current_row_position + _rows_end_offset + 1; if (!_parent->cast() ._window.__isset .window_start) { //[preceding, offset] --unbound: [preceding, following] @@ -362,7 +363,8 @@ Status AnalyticLocalState::_get_next_for_rows(size_t current_block_rows) { } else { range_start = _shared_state->current_row_position + _rows_start_offset; } - range_end = _shared_state->current_row_position + _rows_end_offset + 1; + // Make sure range_start <= range_end + range_start = std::min(range_start, range_end); } _executor.execute(_partition_by_start.pos, _shared_state->partition_by_end.pos, range_start, range_end); diff --git a/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp b/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp index 5127605097f4c5..70b73225f060e8 100644 --- a/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp +++ b/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp @@ -500,8 +500,12 @@ Status DistinctStreamingAggLocalState::close(RuntimeState* state) { _aggregated_block->clear(); // If the limit is reached, there may still be remaining data in the cache block. // If the limit is not reached, the cache block must be empty. - DCHECK(_reach_limit || _aggregated_block->empty()); - DCHECK(_reach_limit || _cache_block.empty()); + // If the query is canceled, it might not satisfy the above conditions. + if (!state->is_cancelled()) { + if (!_reach_limit && !_cache_block.empty()) { + LOG_WARNING("If the limit is not reached, the cache block must be empty."); + } + } _cache_block.clear(); return Base::close(state); } diff --git a/be/src/pipeline/exec/exchange_sink_operator.cpp b/be/src/pipeline/exec/exchange_sink_operator.cpp index 366b3c682f7dd5..aef8004d5bdc35 100644 --- a/be/src/pipeline/exec/exchange_sink_operator.cpp +++ b/be/src/pipeline/exec/exchange_sink_operator.cpp @@ -18,6 +18,7 @@ #include "exchange_sink_operator.h" #include +#include #include #include @@ -98,7 +99,7 @@ Status ExchangeSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& inf // Make sure brpc stub is ready before execution. for (int i = 0; i < channels.size(); ++i) { - RETURN_IF_ERROR(channels[i]->init_stub(state)); + RETURN_IF_ERROR(channels[i]->init(state)); _wait_channel_timer.push_back(_profile->add_nonzero_counter( fmt::format("WaitForLocalExchangeBuffer{}", i), TUnit ::TIME_NS, timer_name, 1)); } @@ -190,6 +191,10 @@ Status ExchangeSinkLocalState::open(RuntimeState* state) { std::make_unique(_vpartition.get(), find_tablet_mode); _tablet_sink_tuple_desc = _state->desc_tbl().get_tuple_descriptor(p._tablet_sink_tuple_id); _tablet_sink_row_desc = p._pool->add(new RowDescriptor(_tablet_sink_tuple_desc, false)); + _tablet_sink_expr_ctxs.resize(p._tablet_sink_expr_ctxs.size()); + for (size_t i = 0; i < _tablet_sink_expr_ctxs.size(); i++) { + RETURN_IF_ERROR(p._tablet_sink_expr_ctxs[i]->clone(state, _tablet_sink_expr_ctxs[i])); + } // if _part_type == TPartitionType::TABLET_SINK_SHUFFLE_PARTITIONED, we handle the processing of auto_increment column // on exchange node rather than on TabletWriter _block_convertor = @@ -206,7 +211,7 @@ Status ExchangeSinkLocalState::open(RuntimeState* state) { .txn_id = _txn_id, .pool = p._pool.get(), .location = _location, - .vec_output_expr_ctxs = &_fake_expr_ctxs, + .vec_output_expr_ctxs = &_tablet_sink_expr_ctxs, .schema = _schema, .caller = (void*)this, .create_partition_callback = &ExchangeSinkLocalState::empty_callback_function}); @@ -297,6 +302,7 @@ ExchangeSinkOperatorX::ExchangeSinkOperatorX( _tablet_sink_location(sink.tablet_sink_location), _tablet_sink_tuple_id(sink.tablet_sink_tuple_id), _tablet_sink_txn_id(sink.tablet_sink_txn_id), + _t_tablet_sink_exprs(&sink.tablet_sink_exprs), _enable_local_merge_sort(state->enable_local_merge_sort()) { DCHECK_GT(destinations.size(), 0); DCHECK(sink.output_partition.type == TPartitionType::UNPARTITIONED || @@ -309,6 +315,9 @@ ExchangeSinkOperatorX::ExchangeSinkOperatorX( sink.output_partition.type == TPartitionType::TABLE_SINK_RANDOM_PARTITIONED); _name = "ExchangeSinkOperatorX"; _pool = std::make_shared(); + if (sink.__isset.output_tuple_id) { + _output_tuple_id = sink.output_tuple_id; + } } Status ExchangeSinkOperatorX::init(const TDataSink& tsink) { @@ -316,6 +325,10 @@ Status ExchangeSinkOperatorX::init(const TDataSink& tsink) { if (_part_type == TPartitionType::RANGE_PARTITIONED) { return Status::InternalError("TPartitionType::RANGE_PARTITIONED should not be used"); } + if (_part_type == TPartitionType::TABLET_SINK_SHUFFLE_PARTITIONED) { + RETURN_IF_ERROR(vectorized::VExpr::create_expr_trees(*_t_tablet_sink_exprs, + _tablet_sink_expr_ctxs)); + } return Status::OK(); } @@ -324,6 +337,18 @@ Status ExchangeSinkOperatorX::open(RuntimeState* state) { _state = state; _mem_tracker = std::make_unique("ExchangeSinkOperatorX:"); _compression_type = state->fragement_transmission_compression_type(); + if (_part_type == TPartitionType::TABLET_SINK_SHUFFLE_PARTITIONED) { + if (_output_tuple_id == -1) { + RETURN_IF_ERROR( + vectorized::VExpr::prepare(_tablet_sink_expr_ctxs, state, _child->row_desc())); + } else { + auto* output_tuple_desc = state->desc_tbl().get_tuple_descriptor(_output_tuple_id); + auto* output_row_desc = _pool->add(new RowDescriptor(output_tuple_desc, false)); + RETURN_IF_ERROR( + vectorized::VExpr::prepare(_tablet_sink_expr_ctxs, state, *output_row_desc)); + } + RETURN_IF_ERROR(vectorized::VExpr::open(_tablet_sink_expr_ctxs, state)); + } return Status::OK(); } diff --git a/be/src/pipeline/exec/exchange_sink_operator.h b/be/src/pipeline/exec/exchange_sink_operator.h index dc07773d5ccecb..300e2a5172f3d1 100644 --- a/be/src/pipeline/exec/exchange_sink_operator.h +++ b/be/src/pipeline/exec/exchange_sink_operator.h @@ -183,7 +183,7 @@ class ExchangeSinkLocalState final : public PipelineXSinkLocalState<> { // for shuffle data by partition and tablet int64_t _txn_id = -1; - vectorized::VExprContextSPtrs _fake_expr_ctxs; + vectorized::VExprContextSPtrs _tablet_sink_expr_ctxs; std::unique_ptr _vpartition = nullptr; std::unique_ptr _tablet_finder = nullptr; std::shared_ptr _schema = nullptr; @@ -239,6 +239,7 @@ class ExchangeSinkOperatorX final : public DataSinkOperatorX _texprs; const RowDescriptor& _row_desc; + TTupleId _output_tuple_id = -1; TPartitionType::type _part_type; @@ -265,6 +266,8 @@ class ExchangeSinkOperatorX final : public DataSinkOperatorX _pool; + vectorized::VExprContextSPtrs _tablet_sink_expr_ctxs; + const std::vector* _t_tablet_sink_exprs = nullptr; // for external table sink random partition // Control the number of channels according to the flow, thereby controlling the number of table sink writers. diff --git a/be/src/pipeline/exec/olap_scan_operator.cpp b/be/src/pipeline/exec/olap_scan_operator.cpp index 172aa7a9c870c4..09e999d4737e12 100644 --- a/be/src/pipeline/exec/olap_scan_operator.cpp +++ b/be/src/pipeline/exec/olap_scan_operator.cpp @@ -257,10 +257,8 @@ Status OlapScanLocalState::_init_scanners(std::list* s } auto& p = _parent->cast(); - if (!p._olap_scan_node.output_column_unique_ids.empty()) { - for (auto uid : p._olap_scan_node.output_column_unique_ids) { - _maybe_read_column_ids.emplace(uid); - } + for (auto uid : p._olap_scan_node.output_column_unique_ids) { + _maybe_read_column_ids.emplace(uid); } // ranges constructed from scan keys @@ -482,9 +480,13 @@ Status OlapScanLocalState::_build_key_ranges_and_filters() { // we use `exact_range` to identify a key range is an exact range or not when we convert // it to `_scan_keys`. If `exact_range` is true, we can just discard it from `_olap_filters`. bool exact_range = true; + + // If the `_scan_keys` cannot extend by the range of column, should stop. + bool should_break = false; + bool eos = false; - for (int column_index = 0; - column_index < column_names.size() && !_scan_keys.has_range_value() && !eos; + for (int column_index = 0; column_index < column_names.size() && + !_scan_keys.has_range_value() && !eos && !should_break; ++column_index) { auto iter = _colname_to_value_range.find(column_names[column_index]); if (_colname_to_value_range.end() == iter) { @@ -498,8 +500,9 @@ Status OlapScanLocalState::_build_key_ranges_and_filters() { // but the original range may be converted to olap filters, if it's not a exact_range. auto temp_range = range; if (range.get_fixed_value_size() <= p._max_pushdown_conditions_per_column) { - RETURN_IF_ERROR(_scan_keys.extend_scan_key( - temp_range, p._max_scan_key_num, &exact_range, &eos)); + RETURN_IF_ERROR( + _scan_keys.extend_scan_key(temp_range, p._max_scan_key_num, + &exact_range, &eos, &should_break)); if (exact_range) { _colname_to_value_range.erase(iter->first); } @@ -507,8 +510,9 @@ Status OlapScanLocalState::_build_key_ranges_and_filters() { // if exceed max_pushdown_conditions_per_column, use whole_value_rang instead // and will not erase from _colname_to_value_range, it must be not exact_range temp_range.set_whole_value_range(); - RETURN_IF_ERROR(_scan_keys.extend_scan_key( - temp_range, p._max_scan_key_num, &exact_range, &eos)); + RETURN_IF_ERROR( + _scan_keys.extend_scan_key(temp_range, p._max_scan_key_num, + &exact_range, &eos, &should_break)); } return Status::OK(); }, diff --git a/be/src/pipeline/exec/partition_sort_sink_operator.cpp b/be/src/pipeline/exec/partition_sort_sink_operator.cpp index 94c51e160da2a2..3a850a40b13c66 100644 --- a/be/src/pipeline/exec/partition_sort_sink_operator.cpp +++ b/be/src/pipeline/exec/partition_sort_sink_operator.cpp @@ -17,6 +17,8 @@ #include "partition_sort_sink_operator.h" +#include + #include "common/status.h" #include "partition_sort_source_operator.h" #include "vec/common/hash_table/hash.h" @@ -107,8 +109,13 @@ Status PartitionSortSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo for (size_t i = 0; i < p._partition_expr_ctxs.size(); i++) { RETURN_IF_ERROR(p._partition_expr_ctxs[i]->clone(state, _partition_expr_ctxs[i])); } + _topn_phase = p._topn_phase; _partition_exprs_num = p._partition_exprs_num; _hash_table_size_counter = ADD_COUNTER(_profile, "HashTableSize", TUnit::UNIT); + _serialize_key_arena_memory_usage = + _profile->AddHighWaterMarkCounter("SerializeKeyArena", TUnit::BYTES, "MemoryUsage", 1); + _hash_table_memory_usage = + ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "HashTable", TUnit::BYTES, "MemoryUsage", 1); _build_timer = ADD_TIMER(_profile, "HashTableBuildTime"); _selector_block_timer = ADD_TIMER(_profile, "SelectorBlockTime"); _emplace_key_timer = ADD_TIMER(_profile, "EmplaceKeyTime"); @@ -119,6 +126,8 @@ Status PartitionSortSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo &_vsort_exec_exprs, p._limit, 0, p._pool, p._is_asc_order, p._nulls_first, p._child->row_desc(), state, _profile, p._has_global_limit, p._partition_inner_limit, p._top_n_algorithm, p._topn_phase); + _profile->add_info_string("PartitionTopNPhase", to_string(p._topn_phase)); + _profile->add_info_string("PartitionTopNLimit", std::to_string(p._partition_inner_limit)); RETURN_IF_ERROR(_init_hash_method()); return Status::OK(); } @@ -177,11 +186,7 @@ Status PartitionSortSinkOperatorX::sink(RuntimeState* state, vectorized::Block* } local_state._value_places[0]->append_whole_block(input_block, _child->row_desc()); } else { - //just simply use partition num to check - //if is TWO_PHASE_GLOBAL, must be sort all data thought partition num threshold have been exceeded. - if (_topn_phase != TPartTopNPhase::TWO_PHASE_GLOBAL && - local_state._num_partition > config::partition_topn_partition_threshold && - local_state._sorted_partition_input_rows < 10000 * local_state._num_partition) { + if (local_state._is_need_passthrough) { { COUNTER_UPDATE(local_state._passthrough_rows_counter, (int64_t)current_rows); std::lock_guard lock(local_state._shared_state->buffer_mutex); @@ -193,8 +198,6 @@ Status PartitionSortSinkOperatorX::sink(RuntimeState* state, vectorized::Block* RETURN_IF_ERROR(_split_block_by_partition(input_block, local_state, eos)); RETURN_IF_CANCELLED(state); input_block->clear_column_data(); - local_state._sorted_partition_input_rows = - local_state._sorted_partition_input_rows + current_rows; } } } @@ -225,6 +228,8 @@ Status PartitionSortSinkOperatorX::sink(RuntimeState* state, vectorized::Block* local_state._shared_state->sink_eos = true; local_state._dependency->set_ready_to_read(); } + local_state._profile->add_info_string("HasPassThrough", + local_state._is_need_passthrough ? "Yes" : "No"); } return Status::OK(); @@ -245,7 +250,7 @@ Status PartitionSortSinkOperatorX::_split_block_by_partition( } Status PartitionSortSinkOperatorX::_emplace_into_hash_table( - const vectorized::ColumnRawPtrs& key_columns, const vectorized::Block* input_block, + const vectorized::ColumnRawPtrs& key_columns, vectorized::Block* input_block, PartitionSortSinkLocalState& local_state, bool eos) { return std::visit( vectorized::Overload { @@ -280,15 +285,37 @@ Status PartitionSortSinkOperatorX::_emplace_into_hash_table( }; SCOPED_TIMER(local_state._emplace_key_timer); - for (size_t row = 0; row < num_rows; ++row) { + int row = num_rows; + for (row = row - 1; row >= 0 && !local_state._is_need_passthrough; --row) { auto& mapped = agg_method.lazy_emplace(state, row, creator, creator_for_null_key); mapped->add_row_idx(row); + local_state._sorted_partition_input_rows++; + local_state._is_need_passthrough = + local_state.check_whether_need_passthrough(); } for (auto* place : local_state._value_places) { SCOPED_TIMER(local_state._selector_block_timer); RETURN_IF_ERROR(place->append_block_by_selector(input_block, eos)); } + if (local_state._is_need_passthrough) { + { + COUNTER_UPDATE(local_state._passthrough_rows_counter, + (int64_t)(num_rows - row)); + std::lock_guard lock( + local_state._shared_state->buffer_mutex); + // have emplace (num_rows - row) to hashtable, and now have row remaining needed in block; + input_block->set_num_rows(row); + local_state._shared_state->blocks_buffer.push( + std::move(*input_block)); + // buffer have data, source could read this. + local_state._dependency->set_ready_to_read(); + } + } + local_state._serialize_key_arena_memory_usage->set( + (int64_t)local_state._agg_arena_pool->size()); + COUNTER_SET(local_state._hash_table_memory_usage, + (int64_t)agg_method.hash_table->get_buffer_size_in_bytes()); return Status::OK(); }}, local_state._partitioned_data->method_variant); @@ -303,4 +330,20 @@ Status PartitionSortSinkLocalState::_init_hash_method() { return Status::OK(); } +// NOLINTBEGIN(readability-simplify-boolean-expr) +// just simply use partition num to check +// but if is TWO_PHASE_GLOBAL, must be sort all data thought partition num threshold have been exceeded. +// partition_topn_max_partitions default is : 1024 +// partition_topn_per_partition_rows default is : 1000 +bool PartitionSortSinkLocalState::check_whether_need_passthrough() { + if (_topn_phase != TPartTopNPhase::TWO_PHASE_GLOBAL && + _num_partition > _state->partition_topn_max_partitions() && + _sorted_partition_input_rows < + _state->partition_topn_per_partition_rows() * _num_partition) { + return true; + } + return false; +} +// NOLINTEND(readability-simplify-boolean-expr) + } // namespace doris::pipeline diff --git a/be/src/pipeline/exec/partition_sort_sink_operator.h b/be/src/pipeline/exec/partition_sort_sink_operator.h index 107017e66188b5..f16df509dca4a0 100644 --- a/be/src/pipeline/exec/partition_sort_sink_operator.h +++ b/be/src/pipeline/exec/partition_sort_sink_operator.h @@ -66,7 +66,12 @@ struct PartitionSortInfo { }; static constexpr size_t INITIAL_BUFFERED_BLOCK_BYTES = 64 << 20; + +#ifndef NDEBUG +static constexpr size_t PARTITION_SORT_ROWS_THRESHOLD = 10; +#else static constexpr size_t PARTITION_SORT_ROWS_THRESHOLD = 20000; +#endif struct PartitionBlocks { public: @@ -234,6 +239,8 @@ class PartitionSortSinkLocalState : public PipelineXSinkLocalState _agg_arena_pool; int _partition_exprs_num = 0; std::shared_ptr _partition_sort_info = nullptr; + TPartTopNPhase::type _topn_phase; + bool _is_need_passthrough = false; RuntimeProfile::Counter* _build_timer = nullptr; RuntimeProfile::Counter* _emplace_key_timer = nullptr; @@ -241,7 +248,10 @@ class PartitionSortSinkLocalState : public PipelineXSinkLocalState { @@ -284,7 +294,7 @@ class PartitionSortSinkOperatorX final : public DataSinkOperatorXpartition_sorts.size()) { RETURN_IF_ERROR(local_state._shared_state->partition_sorts[local_state._sort_idx]->get_next( state, output_block, ¤t_eos)); + COUNTER_UPDATE(local_state._sorted_partition_output_rows_counter, output_block->rows()); } if (current_eos) { - //current sort have eos, so get next idx - auto rows = local_state._shared_state->partition_sorts[local_state._sort_idx] - ->get_output_rows(); - COUNTER_UPDATE(local_state._sorted_partition_output_rows_counter, rows); + // current sort have eos, so get next idx local_state._shared_state->partition_sorts[local_state._sort_idx].reset(nullptr); local_state._sort_idx++; } diff --git a/be/src/pipeline/exec/result_file_sink_operator.cpp b/be/src/pipeline/exec/result_file_sink_operator.cpp index 20918ab81ce818..8a47e2402bc022 100644 --- a/be/src/pipeline/exec/result_file_sink_operator.cpp +++ b/be/src/pipeline/exec/result_file_sink_operator.cpp @@ -123,7 +123,7 @@ Status ResultFileSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& i shuffle(_channels.begin(), _channels.end(), g); for (auto& _channel : _channels) { - RETURN_IF_ERROR(_channel->init_stub(state)); + RETURN_IF_ERROR(_channel->init(state)); } } _writer->set_header_info(p._header_type, p._header); diff --git a/be/src/pipeline/exec/scan_operator.cpp b/be/src/pipeline/exec/scan_operator.cpp index eb30d62495d485..507039b1f5e8bd 100644 --- a/be/src/pipeline/exec/scan_operator.cpp +++ b/be/src/pipeline/exec/scan_operator.cpp @@ -996,7 +996,16 @@ Status ScanLocalState::_start_scanners( auto& p = _parent->cast(); _scanner_ctx = vectorized::ScannerContext::create_shared( state(), this, p._output_tuple_desc, p.output_row_descriptor(), scanners, p.limit(), - _scan_dependency, p.ignore_data_distribution()); + state()->scan_queue_mem_limit(), _scan_dependency, + // NOTE: This will logic makes _max_thread_num of ScannerContext to be C(num of cores) * 2 + // For a query with C/2 instance and M scan node, scan task of this query will be C/2 * M * C*2 + // and will be C*C*N at most. + // 1. If data distribution is ignored , we use 1 instance to scan. + // 2. Else if this operator is not file scan operator, we use config::doris_scanner_thread_pool_thread_num scanners to scan. + // 3. Else, file scanner will consume much memory so we use config::doris_scanner_thread_pool_thread_num / query_parallel_instance_num scanners to scan. + p.ignore_data_distribution() || !p.is_file_scan_operator() + ? 1 + : state()->query_parallel_instance_num()); return Status::OK(); } @@ -1191,18 +1200,12 @@ Status ScanOperatorX::init(const TPlanNode& tnode, RuntimeState* } } } else { - DCHECK(query_options.__isset.adaptive_pipeline_task_serial_read_on_limit); // The set of enable_adaptive_pipeline_task_serial_read_on_limit // is checked in previous branch. if (query_options.enable_adaptive_pipeline_task_serial_read_on_limit) { - int32_t adaptive_pipeline_task_serial_read_on_limit = - ADAPTIVE_PIPELINE_TASK_SERIAL_READ_ON_LIMIT_DEFAULT; - if (query_options.__isset.adaptive_pipeline_task_serial_read_on_limit) { - adaptive_pipeline_task_serial_read_on_limit = - query_options.adaptive_pipeline_task_serial_read_on_limit; - } - - if (tnode.limit > 0 && tnode.limit <= adaptive_pipeline_task_serial_read_on_limit) { + DCHECK(query_options.__isset.adaptive_pipeline_task_serial_read_on_limit); + if (tnode.limit > 0 && + tnode.limit <= query_options.adaptive_pipeline_task_serial_read_on_limit) { _should_run_serial = true; } } diff --git a/be/src/pipeline/pipeline.h b/be/src/pipeline/pipeline.h index c014a090170f48..dfeb53ae006116 100644 --- a/be/src/pipeline/pipeline.h +++ b/be/src/pipeline/pipeline.h @@ -105,7 +105,6 @@ class Pipeline : public std::enable_shared_from_this { void set_children(std::vector> children) { _children = children; } void incr_created_tasks() { _num_tasks_created++; } - bool need_to_create_task() const { return _num_tasks > _num_tasks_created; } void set_num_tasks(int num_tasks) { _num_tasks = num_tasks; for (auto& op : _operators) { @@ -158,7 +157,7 @@ class Pipeline : public std::enable_shared_from_this { // How many tasks should be created ? int _num_tasks = 1; // How many tasks are already created? - int _num_tasks_created = 0; + std::atomic _num_tasks_created = 0; }; } // namespace doris::pipeline diff --git a/be/src/pipeline/pipeline_fragment_context.cpp b/be/src/pipeline/pipeline_fragment_context.cpp index 3513c2ba176278..e3d7f56d8f31f7 100644 --- a/be/src/pipeline/pipeline_fragment_context.cpp +++ b/be/src/pipeline/pipeline_fragment_context.cpp @@ -139,8 +139,10 @@ PipelineFragmentContext::~PipelineFragmentContext() { } } _tasks.clear(); - for (auto& runtime_state : _task_runtime_states) { - runtime_state.reset(); + for (auto& runtime_states : _task_runtime_states) { + for (auto& runtime_state : runtime_states) { + runtime_state.reset(); + } } _pipelines.clear(); _sink.reset(); @@ -231,7 +233,8 @@ PipelinePtr PipelineFragmentContext::add_pipeline(PipelinePtr parent, int idx) { return pipeline; } -Status PipelineFragmentContext::prepare(const doris::TPipelineFragmentParams& request) { +Status PipelineFragmentContext::prepare(const doris::TPipelineFragmentParams& request, + ThreadPool* thread_pool) { if (_prepared) { return Status::InternalError("Already prepared"); } @@ -239,14 +242,14 @@ Status PipelineFragmentContext::prepare(const doris::TPipelineFragmentParams& re _timeout = request.query_options.execution_timeout; } - _runtime_profile = std::make_unique("PipelineContext"); - _prepare_timer = ADD_TIMER(_runtime_profile, "PrepareTime"); + _fragment_level_profile = std::make_unique("PipelineContext"); + _prepare_timer = ADD_TIMER(_fragment_level_profile, "PrepareTime"); SCOPED_TIMER(_prepare_timer); - _build_pipelines_timer = ADD_TIMER(_runtime_profile, "BuildPipelinesTime"); - _init_context_timer = ADD_TIMER(_runtime_profile, "InitContextTime"); - _plan_local_shuffle_timer = ADD_TIMER(_runtime_profile, "PlanLocalShuffleTime"); - _build_tasks_timer = ADD_TIMER(_runtime_profile, "BuildTasksTime"); - _prepare_all_pipelines_timer = ADD_TIMER(_runtime_profile, "PrepareAllPipelinesTime"); + _build_pipelines_timer = ADD_TIMER(_fragment_level_profile, "BuildPipelinesTime"); + _init_context_timer = ADD_TIMER(_fragment_level_profile, "InitContextTime"); + _plan_local_shuffle_timer = ADD_TIMER(_fragment_level_profile, "PlanLocalShuffleTime"); + _build_tasks_timer = ADD_TIMER(_fragment_level_profile, "BuildTasksTime"); + _prepare_all_pipelines_timer = ADD_TIMER(_fragment_level_profile, "PrepareAllPipelinesTime"); { SCOPED_TIMER(_init_context_timer); _num_instances = request.local_params.size(); @@ -348,7 +351,7 @@ Status PipelineFragmentContext::prepare(const doris::TPipelineFragmentParams& re { SCOPED_TIMER(_build_tasks_timer); // 5. Build pipeline tasks and initialize local state. - RETURN_IF_ERROR(_build_pipeline_tasks(request)); + RETURN_IF_ERROR(_build_pipeline_tasks(request, thread_pool)); } _init_next_report_time(); @@ -357,17 +360,23 @@ Status PipelineFragmentContext::prepare(const doris::TPipelineFragmentParams& re return Status::OK(); } -Status PipelineFragmentContext::_build_pipeline_tasks( - const doris::TPipelineFragmentParams& request) { +Status PipelineFragmentContext::_build_pipeline_tasks(const doris::TPipelineFragmentParams& request, + ThreadPool* thread_pool) { _total_tasks = 0; - int target_size = request.local_params.size(); + const auto target_size = request.local_params.size(); _tasks.resize(target_size); + _fragment_instance_ids.resize(target_size); + _runtime_filter_states.resize(target_size); + _task_runtime_states.resize(_pipelines.size()); + for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) { + _task_runtime_states[pip_idx].resize(_pipelines[pip_idx]->num_tasks()); + } auto pipeline_id_to_profile = _runtime_state->build_pipeline_profile(_pipelines.size()); - for (size_t i = 0; i < target_size; i++) { + auto pre_and_submit = [&](int i, PipelineFragmentContext* ctx) { const auto& local_params = request.local_params[i]; auto fragment_instance_id = local_params.fragment_instance_id; - _fragment_instance_ids.push_back(fragment_instance_id); + _fragment_instance_ids[i] = fragment_instance_id; std::unique_ptr runtime_filter_mgr; auto init_runtime_state = [&](std::unique_ptr& runtime_state) { runtime_state->set_query_mem_tracker(_query_ctx->query_mem_tracker); @@ -426,7 +435,7 @@ Status PipelineFragmentContext::_build_pipeline_tasks( filterparams->runtime_filter_mgr = runtime_filter_mgr.get(); - _runtime_filter_states.push_back(std::move(filterparams)); + _runtime_filter_states[i] = std::move(filterparams); std::map pipeline_id_to_task; auto get_local_exchange_state = [&](PipelinePtr pipeline) -> std::map, @@ -449,13 +458,15 @@ Status PipelineFragmentContext::_build_pipeline_tasks( for (size_t pip_idx = 0; pip_idx < _pipelines.size(); pip_idx++) { auto& pipeline = _pipelines[pip_idx]; - if (pipeline->need_to_create_task()) { - // build task runtime state - _task_runtime_states.push_back(RuntimeState::create_unique( + if (pipeline->num_tasks() > 1 || i == 0) { + DCHECK(_task_runtime_states[pip_idx][i] == nullptr) + << print_id(_task_runtime_states[pip_idx][i]->fragment_instance_id()) << " " + << pipeline->debug_string(); + _task_runtime_states[pip_idx][i] = RuntimeState::create_unique( this, local_params.fragment_instance_id, request.query_id, request.fragment_id, request.query_options, _query_ctx->query_globals, - _exec_env, _query_ctx.get())); - auto& task_runtime_state = _task_runtime_states.back(); + _exec_env, _query_ctx.get()); + auto& task_runtime_state = _task_runtime_states[pip_idx][i]; init_runtime_state(task_runtime_state); auto cur_task_id = _total_tasks++; task_runtime_state->set_task_id(cur_task_id); @@ -529,6 +540,39 @@ Status PipelineFragmentContext::_build_pipeline_tasks( std::lock_guard l(_state_map_lock); _runtime_filter_mgr_map[fragment_instance_id] = std::move(runtime_filter_mgr); } + return Status::OK(); + }; + if (target_size > 1 && + (_runtime_state->query_options().__isset.parallel_prepare_threshold && + target_size > _runtime_state->query_options().parallel_prepare_threshold)) { + std::vector prepare_status(target_size); + std::mutex m; + std::condition_variable cv; + int prepare_done = 0; + for (size_t i = 0; i < target_size; i++) { + RETURN_IF_ERROR(thread_pool->submit_func([&, i]() { + SCOPED_ATTACH_TASK(_query_ctx.get()); + prepare_status[i] = pre_and_submit(i, this); + std::unique_lock lock(m); + prepare_done++; + if (prepare_done == target_size) { + cv.notify_one(); + } + })); + } + std::unique_lock lock(m); + if (prepare_done != target_size) { + cv.wait(lock); + for (size_t i = 0; i < target_size; i++) { + if (!prepare_status[i].ok()) { + return prepare_status[i]; + } + } + } + } else { + for (size_t i = 0; i < target_size; i++) { + RETURN_IF_ERROR(pre_and_submit(i, this)); + } } _pipeline_parent_map.clear(); _dag.clear(); @@ -1683,7 +1727,7 @@ void PipelineFragmentContext::_close_fragment_instance() { return; } Defer defer_op {[&]() { _is_fragment_instance_closed = true; }}; - _runtime_profile->total_time_counter()->update(_fragment_watcher.elapsed_time()); + _fragment_level_profile->total_time_counter()->update(_fragment_watcher.elapsed_time()); static_cast(send_report(true)); // Print profile content in info log is a tempoeray solution for stream load and external_connector. // Since stream load does not have someting like coordinator on FE, so @@ -1749,14 +1793,16 @@ Status PipelineFragmentContext::send_report(bool done) { std::vector runtime_states; - for (auto& task_state : _task_runtime_states) { - runtime_states.push_back(task_state.get()); + for (auto& task_states : _task_runtime_states) { + for (auto& task_state : task_states) { + if (task_state) { + runtime_states.push_back(task_state.get()); + } + } } ReportStatusRequest req {exec_status, runtime_states, - _runtime_profile.get(), - _runtime_state->load_channel_profile(), done || !exec_status.ok(), _query_ctx->coord_addr, _query_id, @@ -1798,6 +1844,11 @@ PipelineFragmentContext::collect_realtime_profile() const { return res; } + // Make sure first profile is fragment level profile + auto fragment_profile = std::make_shared(); + _fragment_level_profile->to_thrift(fragment_profile.get()); + res.push_back(fragment_profile); + // pipeline_id_to_profile is initialized in prepare stage for (auto pipeline_profile : _runtime_state->pipeline_id_to_profile()) { auto profile_ptr = std::make_shared(); @@ -1821,15 +1872,17 @@ PipelineFragmentContext::collect_realtime_load_channel_profile() const { return nullptr; } - for (auto& runtime_state : _task_runtime_states) { - if (runtime_state->runtime_profile() == nullptr) { - continue; - } + for (auto& runtime_states : _task_runtime_states) { + for (auto& runtime_state : runtime_states) { + if (runtime_state->runtime_profile() == nullptr) { + continue; + } - auto tmp_load_channel_profile = std::make_shared(); + auto tmp_load_channel_profile = std::make_shared(); - runtime_state->runtime_profile()->to_thrift(tmp_load_channel_profile.get()); - this->_runtime_state->load_channel_profile()->update(*tmp_load_channel_profile); + runtime_state->runtime_profile()->to_thrift(tmp_load_channel_profile.get()); + this->_runtime_state->load_channel_profile()->update(*tmp_load_channel_profile); + } } auto load_channel_profile = std::make_shared(); diff --git a/be/src/pipeline/pipeline_fragment_context.h b/be/src/pipeline/pipeline_fragment_context.h index f46835e95e0647..c221d076455b78 100644 --- a/be/src/pipeline/pipeline_fragment_context.h +++ b/be/src/pipeline/pipeline_fragment_context.h @@ -88,7 +88,7 @@ class PipelineFragmentContext : public TaskExecutionContext { // should be protected by lock? [[nodiscard]] bool is_canceled() const { return _runtime_state->is_cancelled(); } - Status prepare(const doris::TPipelineFragmentParams& request); + Status prepare(const doris::TPipelineFragmentParams& request, ThreadPool* thread_pool); Status submit(); @@ -187,7 +187,8 @@ class PipelineFragmentContext : public TaskExecutionContext { bool _enable_local_shuffle() const { return _runtime_state->enable_local_shuffle(); } - Status _build_pipeline_tasks(const doris::TPipelineFragmentParams& request); + Status _build_pipeline_tasks(const doris::TPipelineFragmentParams& request, + ThreadPool* thread_pool); void _close_fragment_instance(); void _init_next_report_time(); @@ -206,9 +207,9 @@ class PipelineFragmentContext : public TaskExecutionContext { int _closed_tasks = 0; // After prepared, `_total_tasks` is equal to the size of `_tasks`. // When submit fail, `_total_tasks` is equal to the number of tasks submitted. - int _total_tasks = 0; + std::atomic _total_tasks = 0; - std::unique_ptr _runtime_profile; + std::unique_ptr _fragment_level_profile; bool _is_report_success = false; std::unique_ptr _runtime_state; @@ -303,7 +304,7 @@ class PipelineFragmentContext : public TaskExecutionContext { std::vector _fragment_instance_ids; // Local runtime states for each task - std::vector> _task_runtime_states; + std::vector>> _task_runtime_states; std::vector> _runtime_filter_states; diff --git a/be/src/pipeline/query_cache/query_cache.cpp b/be/src/pipeline/query_cache/query_cache.cpp index e6d41ecaba5893..20e342e140f156 100644 --- a/be/src/pipeline/query_cache/query_cache.cpp +++ b/be/src/pipeline/query_cache/query_cache.cpp @@ -49,9 +49,8 @@ void QueryCache::insert(const CacheKey& key, int64_t version, CacheResult& res, auto cache_value_ptr = std::make_unique(version, std::move(cache_result), slot_orders); - QueryCacheHandle(this, LRUCachePolicyTrackingManual::insert( - key, (void*)cache_value_ptr.release(), cache_size, cache_size, - CachePriority::NORMAL)); + QueryCacheHandle(this, LRUCachePolicy::insert(key, (void*)cache_value_ptr.release(), cache_size, + cache_size, CachePriority::NORMAL)); } bool QueryCache::lookup(const CacheKey& key, int64_t version, doris::QueryCacheHandle* handle) { diff --git a/be/src/pipeline/query_cache/query_cache.h b/be/src/pipeline/query_cache/query_cache.h index 6ec00b91f7816c..a905831b530578 100644 --- a/be/src/pipeline/query_cache/query_cache.h +++ b/be/src/pipeline/query_cache/query_cache.h @@ -86,9 +86,9 @@ class QueryCacheHandle { DISALLOW_COPY_AND_ASSIGN(QueryCacheHandle); }; -class QueryCache : public LRUCachePolicyTrackingManual { +class QueryCache : public LRUCachePolicy { public: - using LRUCachePolicyTrackingManual::insert; + using LRUCachePolicy::insert; struct CacheValue : public LRUCacheValueBase { int64_t version; @@ -140,8 +140,8 @@ class QueryCache : public LRUCachePolicyTrackingManual { QueryCache() = delete; QueryCache(size_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::QUERY_CACHE, capacity, - LRUCacheType::SIZE, 3600 * 24, num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::QUERY_CACHE, capacity, LRUCacheType::SIZE, + 3600 * 24, num_shards) {} bool lookup(const CacheKey& key, int64_t version, QueryCacheHandle* handle); diff --git a/be/src/runtime/exec_env.h b/be/src/runtime/exec_env.h index 38fcaceb47953a..61cebad10b9e78 100644 --- a/be/src/runtime/exec_env.h +++ b/be/src/runtime/exec_env.h @@ -78,6 +78,7 @@ class LoadPathMgr; class NewLoadStreamMgr; class MemTrackerLimiter; class MemTracker; +struct TrackerLimiterGroup; class BaseStorageEngine; class ResultBufferMgr; class ResultQueueMgr; @@ -174,9 +175,10 @@ class ExecEnv { std::vector mem_tracker_limiter_pool; void init_mem_tracker(); std::shared_ptr orphan_mem_tracker() { return _orphan_mem_tracker; } - MemTrackerLimiter* details_mem_tracker_set() { return _details_mem_tracker_set.get(); } std::shared_ptr page_no_cache_mem_tracker() { return _page_no_cache_mem_tracker; } - MemTracker* brpc_iobuf_block_memory_tracker() { return _brpc_iobuf_block_memory_tracker.get(); } + std::shared_ptr brpc_iobuf_block_memory_tracker() { + return _brpc_iobuf_block_memory_tracker; + } std::shared_ptr segcompaction_mem_tracker() { return _segcompaction_mem_tracker; } @@ -359,10 +361,9 @@ class ExecEnv { // Ideally, all threads are expected to attach to the specified tracker, so that "all memory has its own ownership", // and the consumption of the orphan mem tracker is close to 0, but greater than 0. std::shared_ptr _orphan_mem_tracker; - std::shared_ptr _details_mem_tracker_set; // page size not in cache, data page/index page/etc. std::shared_ptr _page_no_cache_mem_tracker; - std::shared_ptr _brpc_iobuf_block_memory_tracker; + std::shared_ptr _brpc_iobuf_block_memory_tracker; // Count the memory consumption of segment compaction tasks. std::shared_ptr _segcompaction_mem_tracker; std::shared_ptr _stream_load_pipe_tracker; diff --git a/be/src/runtime/exec_env_init.cpp b/be/src/runtime/exec_env_init.cpp index a69709d24d2466..758a2f3760c7a7 100644 --- a/be/src/runtime/exec_env_init.cpp +++ b/be/src/runtime/exec_env_init.cpp @@ -591,19 +591,16 @@ void ExecEnv::init_mem_tracker() { _s_tracking_memory = true; _orphan_mem_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "Orphan"); - _details_mem_tracker_set = - MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "DetailsTrackerSet"); - _page_no_cache_mem_tracker = - std::make_shared("PageNoCache", _details_mem_tracker_set.get()); + _page_no_cache_mem_tracker = std::make_shared("PageNoCache"); _brpc_iobuf_block_memory_tracker = - std::make_shared("IOBufBlockMemory", _details_mem_tracker_set.get()); + MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "IOBufBlockMemory"); _segcompaction_mem_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "SegCompaction"); _point_query_executor_mem_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "PointQueryExecutor"); _query_cache_mem_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "QueryCache"); - _block_compression_mem_tracker = _block_compression_mem_tracker = + _block_compression_mem_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "BlockCompression"); _rowid_storage_reader_tracker = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::GLOBAL, "RowIdStorageReader"); diff --git a/be/src/runtime/fragment_mgr.cpp b/be/src/runtime/fragment_mgr.cpp index 4af6b72a220c78..7a4687b50d12f3 100644 --- a/be/src/runtime/fragment_mgr.cpp +++ b/be/src/runtime/fragment_mgr.cpp @@ -322,211 +322,164 @@ void FragmentMgr::coordinator_callback(const ReportStatusRequest& req) { params.__set_status(exec_status.to_thrift()); params.__set_done(req.done); params.__set_query_type(req.runtime_state->query_type()); + params.__isset.profile = false; DCHECK(req.runtime_state != nullptr); - if (req.runtime_state->query_type() == TQueryType::LOAD && !req.done && req.status.ok()) { - // this is a load plan, and load is not finished, just make a brief report + if (req.runtime_state->query_type() == TQueryType::LOAD) { params.__set_loaded_rows(req.runtime_state->num_rows_load_total()); params.__set_loaded_bytes(req.runtime_state->num_bytes_load_total()); } else { - if (req.runtime_state->query_type() == TQueryType::LOAD) { - params.__set_loaded_rows(req.runtime_state->num_rows_load_total()); - params.__set_loaded_bytes(req.runtime_state->num_bytes_load_total()); - } - params.__isset.detailed_report = true; DCHECK(!req.runtime_states.empty()); - const bool enable_profile = (*req.runtime_states.begin())->enable_profile(); - if (enable_profile) { - params.__isset.profile = true; - params.__isset.loadChannelProfile = false; - for (auto* rs : req.runtime_states) { - DCHECK(req.load_channel_profile); - TDetailedReportParams detailed_param; - rs->load_channel_profile()->to_thrift(&detailed_param.loadChannelProfile); - // merge all runtime_states.loadChannelProfile to req.load_channel_profile - req.load_channel_profile->update(detailed_param.loadChannelProfile); - } - req.load_channel_profile->to_thrift(¶ms.loadChannelProfile); - } else { - params.__isset.profile = false; - } - - if (enable_profile) { - DCHECK(req.profile != nullptr); - TDetailedReportParams detailed_param; - detailed_param.__isset.fragment_instance_id = false; - detailed_param.__isset.profile = true; - detailed_param.__isset.loadChannelProfile = false; - detailed_param.__set_is_fragment_level(true); - req.profile->to_thrift(&detailed_param.profile); - params.detailed_report.push_back(detailed_param); - for (auto pipeline_profile : req.runtime_state->pipeline_id_to_profile()) { - TDetailedReportParams detailed_param; - detailed_param.__isset.fragment_instance_id = false; - detailed_param.__isset.profile = true; - detailed_param.__isset.loadChannelProfile = false; - pipeline_profile->to_thrift(&detailed_param.profile); - params.detailed_report.push_back(std::move(detailed_param)); - } - } if (!req.runtime_state->output_files().empty()) { params.__isset.delta_urls = true; for (auto& it : req.runtime_state->output_files()) { params.delta_urls.push_back(to_http_path(it)); } - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - for (auto& it : rs->output_files()) { - params.delta_urls.push_back(to_http_path(it)); - } - } - if (!params.delta_urls.empty()) { - params.__isset.delta_urls = true; - } } + if (!params.delta_urls.empty()) { + params.__isset.delta_urls = true; + } + } - // load rows - static std::string s_dpp_normal_all = "dpp.norm.ALL"; - static std::string s_dpp_abnormal_all = "dpp.abnorm.ALL"; - static std::string s_unselected_rows = "unselected.rows"; - int64_t num_rows_load_success = 0; - int64_t num_rows_load_filtered = 0; - int64_t num_rows_load_unselected = 0; - if (req.runtime_state->num_rows_load_total() > 0 || - req.runtime_state->num_rows_load_filtered() > 0 || - req.runtime_state->num_finished_range() > 0) { - params.__isset.load_counters = true; - - num_rows_load_success = req.runtime_state->num_rows_load_success(); - num_rows_load_filtered = req.runtime_state->num_rows_load_filtered(); - num_rows_load_unselected = req.runtime_state->num_rows_load_unselected(); - params.__isset.fragment_instance_reports = true; - TFragmentInstanceReport t; - t.__set_fragment_instance_id(req.runtime_state->fragment_instance_id()); - t.__set_num_finished_range(req.runtime_state->num_finished_range()); - params.fragment_instance_reports.push_back(t); - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (rs->num_rows_load_total() > 0 || rs->num_rows_load_filtered() > 0 || - req.runtime_state->num_finished_range() > 0) { - params.__isset.load_counters = true; - num_rows_load_success += rs->num_rows_load_success(); - num_rows_load_filtered += rs->num_rows_load_filtered(); - num_rows_load_unselected += rs->num_rows_load_unselected(); - params.__isset.fragment_instance_reports = true; - TFragmentInstanceReport t; - t.__set_fragment_instance_id(rs->fragment_instance_id()); - t.__set_num_finished_range(rs->num_finished_range()); - params.fragment_instance_reports.push_back(t); - } + // load rows + static std::string s_dpp_normal_all = "dpp.norm.ALL"; + static std::string s_dpp_abnormal_all = "dpp.abnorm.ALL"; + static std::string s_unselected_rows = "unselected.rows"; + int64_t num_rows_load_success = 0; + int64_t num_rows_load_filtered = 0; + int64_t num_rows_load_unselected = 0; + if (req.runtime_state->num_rows_load_total() > 0 || + req.runtime_state->num_rows_load_filtered() > 0 || + req.runtime_state->num_finished_range() > 0) { + params.__isset.load_counters = true; + + num_rows_load_success = req.runtime_state->num_rows_load_success(); + num_rows_load_filtered = req.runtime_state->num_rows_load_filtered(); + num_rows_load_unselected = req.runtime_state->num_rows_load_unselected(); + params.__isset.fragment_instance_reports = true; + TFragmentInstanceReport t; + t.__set_fragment_instance_id(req.runtime_state->fragment_instance_id()); + t.__set_num_finished_range(req.runtime_state->num_finished_range()); + params.fragment_instance_reports.push_back(t); + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (rs->num_rows_load_total() > 0 || rs->num_rows_load_filtered() > 0 || + req.runtime_state->num_finished_range() > 0) { + params.__isset.load_counters = true; + num_rows_load_success += rs->num_rows_load_success(); + num_rows_load_filtered += rs->num_rows_load_filtered(); + num_rows_load_unselected += rs->num_rows_load_unselected(); + params.__isset.fragment_instance_reports = true; + TFragmentInstanceReport t; + t.__set_fragment_instance_id(rs->fragment_instance_id()); + t.__set_num_finished_range(rs->num_finished_range()); + params.fragment_instance_reports.push_back(t); } } - params.load_counters.emplace(s_dpp_normal_all, std::to_string(num_rows_load_success)); - params.load_counters.emplace(s_dpp_abnormal_all, std::to_string(num_rows_load_filtered)); - params.load_counters.emplace(s_unselected_rows, std::to_string(num_rows_load_unselected)); - - if (!req.runtime_state->get_error_log_file_path().empty()) { - params.__set_tracking_url( - to_load_error_http_path(req.runtime_state->get_error_log_file_path())); - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (!rs->get_error_log_file_path().empty()) { - params.__set_tracking_url( - to_load_error_http_path(rs->get_error_log_file_path())); - } - if (rs->wal_id() > 0) { - params.__set_txn_id(rs->wal_id()); - params.__set_label(rs->import_label()); - } + } + params.load_counters.emplace(s_dpp_normal_all, std::to_string(num_rows_load_success)); + params.load_counters.emplace(s_dpp_abnormal_all, std::to_string(num_rows_load_filtered)); + params.load_counters.emplace(s_unselected_rows, std::to_string(num_rows_load_unselected)); + + if (!req.runtime_state->get_error_log_file_path().empty()) { + params.__set_tracking_url( + to_load_error_http_path(req.runtime_state->get_error_log_file_path())); + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (!rs->get_error_log_file_path().empty()) { + params.__set_tracking_url(to_load_error_http_path(rs->get_error_log_file_path())); } - } - if (!req.runtime_state->export_output_files().empty()) { - params.__isset.export_files = true; - params.export_files = req.runtime_state->export_output_files(); - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (!rs->export_output_files().empty()) { - params.__isset.export_files = true; - params.export_files.insert(params.export_files.end(), - rs->export_output_files().begin(), - rs->export_output_files().end()); - } + if (rs->wal_id() > 0) { + params.__set_txn_id(rs->wal_id()); + params.__set_label(rs->import_label()); } } - if (!req.runtime_state->tablet_commit_infos().empty()) { - params.__isset.commitInfos = true; - params.commitInfos.reserve(req.runtime_state->tablet_commit_infos().size()); - for (auto& info : req.runtime_state->tablet_commit_infos()) { - params.commitInfos.push_back(info); - } - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (!rs->tablet_commit_infos().empty()) { - params.__isset.commitInfos = true; - params.commitInfos.insert(params.commitInfos.end(), - rs->tablet_commit_infos().begin(), - rs->tablet_commit_infos().end()); - } + } + if (!req.runtime_state->export_output_files().empty()) { + params.__isset.export_files = true; + params.export_files = req.runtime_state->export_output_files(); + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (!rs->export_output_files().empty()) { + params.__isset.export_files = true; + params.export_files.insert(params.export_files.end(), + rs->export_output_files().begin(), + rs->export_output_files().end()); } } - if (!req.runtime_state->error_tablet_infos().empty()) { - params.__isset.errorTabletInfos = true; - params.errorTabletInfos.reserve(req.runtime_state->error_tablet_infos().size()); - for (auto& info : req.runtime_state->error_tablet_infos()) { - params.errorTabletInfos.push_back(info); + } + if (!req.runtime_state->tablet_commit_infos().empty()) { + params.__isset.commitInfos = true; + params.commitInfos.reserve(req.runtime_state->tablet_commit_infos().size()); + for (auto& info : req.runtime_state->tablet_commit_infos()) { + params.commitInfos.push_back(info); + } + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (!rs->tablet_commit_infos().empty()) { + params.__isset.commitInfos = true; + params.commitInfos.insert(params.commitInfos.end(), + rs->tablet_commit_infos().begin(), + rs->tablet_commit_infos().end()); } - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (!rs->error_tablet_infos().empty()) { - params.__isset.errorTabletInfos = true; - params.errorTabletInfos.insert(params.errorTabletInfos.end(), - rs->error_tablet_infos().begin(), - rs->error_tablet_infos().end()); - } + } + } + if (!req.runtime_state->error_tablet_infos().empty()) { + params.__isset.errorTabletInfos = true; + params.errorTabletInfos.reserve(req.runtime_state->error_tablet_infos().size()); + for (auto& info : req.runtime_state->error_tablet_infos()) { + params.errorTabletInfos.push_back(info); + } + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (!rs->error_tablet_infos().empty()) { + params.__isset.errorTabletInfos = true; + params.errorTabletInfos.insert(params.errorTabletInfos.end(), + rs->error_tablet_infos().begin(), + rs->error_tablet_infos().end()); } } + } - if (!req.runtime_state->hive_partition_updates().empty()) { - params.__isset.hive_partition_updates = true; - params.hive_partition_updates.reserve( - req.runtime_state->hive_partition_updates().size()); - for (auto& hive_partition_update : req.runtime_state->hive_partition_updates()) { - params.hive_partition_updates.push_back(hive_partition_update); - } - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (!rs->hive_partition_updates().empty()) { - params.__isset.hive_partition_updates = true; - params.hive_partition_updates.insert(params.hive_partition_updates.end(), - rs->hive_partition_updates().begin(), - rs->hive_partition_updates().end()); - } + if (!req.runtime_state->hive_partition_updates().empty()) { + params.__isset.hive_partition_updates = true; + params.hive_partition_updates.reserve(req.runtime_state->hive_partition_updates().size()); + for (auto& hive_partition_update : req.runtime_state->hive_partition_updates()) { + params.hive_partition_updates.push_back(hive_partition_update); + } + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (!rs->hive_partition_updates().empty()) { + params.__isset.hive_partition_updates = true; + params.hive_partition_updates.insert(params.hive_partition_updates.end(), + rs->hive_partition_updates().begin(), + rs->hive_partition_updates().end()); } } + } - if (!req.runtime_state->iceberg_commit_datas().empty()) { - params.__isset.iceberg_commit_datas = true; - params.iceberg_commit_datas.reserve(req.runtime_state->iceberg_commit_datas().size()); - for (auto& iceberg_commit_data : req.runtime_state->iceberg_commit_datas()) { - params.iceberg_commit_datas.push_back(iceberg_commit_data); - } - } else if (!req.runtime_states.empty()) { - for (auto* rs : req.runtime_states) { - if (!rs->iceberg_commit_datas().empty()) { - params.__isset.iceberg_commit_datas = true; - params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(), - rs->iceberg_commit_datas().begin(), - rs->iceberg_commit_datas().end()); - } + if (!req.runtime_state->iceberg_commit_datas().empty()) { + params.__isset.iceberg_commit_datas = true; + params.iceberg_commit_datas.reserve(req.runtime_state->iceberg_commit_datas().size()); + for (auto& iceberg_commit_data : req.runtime_state->iceberg_commit_datas()) { + params.iceberg_commit_datas.push_back(iceberg_commit_data); + } + } else if (!req.runtime_states.empty()) { + for (auto* rs : req.runtime_states) { + if (!rs->iceberg_commit_datas().empty()) { + params.__isset.iceberg_commit_datas = true; + params.iceberg_commit_datas.insert(params.iceberg_commit_datas.end(), + rs->iceberg_commit_datas().begin(), + rs->iceberg_commit_datas().end()); } } - - // Send new errors to coordinator - req.runtime_state->get_unreported_errors(&(params.error_log)); - params.__isset.error_log = (params.error_log.size() > 0); } + // Send new errors to coordinator + req.runtime_state->get_unreported_errors(&(params.error_log)); + params.__isset.error_log = (!params.error_log.empty()); + if (_exec_env->master_info()->__isset.backend_id) { params.__set_backend_id(_exec_env->master_info()->backend_id); } @@ -822,7 +775,8 @@ Status FragmentMgr::exec_plan_fragment(const TPipelineFragmentParams& params, { SCOPED_RAW_TIMER(&duration_ns); Status prepare_st = Status::OK(); - ASSIGN_STATUS_IF_CATCH_EXCEPTION(prepare_st = context->prepare(params), prepare_st); + ASSIGN_STATUS_IF_CATCH_EXCEPTION(prepare_st = context->prepare(params, _thread_pool.get()), + prepare_st); if (!prepare_st.ok()) { query_ctx->cancel(prepare_st, params.fragment_id); query_ctx->set_execution_dependency_ready(); diff --git a/be/src/runtime/load_channel.cpp b/be/src/runtime/load_channel.cpp index 99f0a0b3d5bb95..f8c11639719303 100644 --- a/be/src/runtime/load_channel.cpp +++ b/be/src/runtime/load_channel.cpp @@ -36,7 +36,8 @@ namespace doris { bvar::Adder g_loadchannel_cnt("loadchannel_cnt"); LoadChannel::LoadChannel(const UniqueId& load_id, int64_t timeout_s, bool is_high_priority, - std::string sender_ip, int64_t backend_id, bool enable_profile) + std::string sender_ip, int64_t backend_id, bool enable_profile, + int64_t wg_id) : _load_id(load_id), _timeout_s(timeout_s), _is_high_priority(is_high_priority), @@ -46,16 +47,29 @@ LoadChannel::LoadChannel(const UniqueId& load_id, int64_t timeout_s, bool is_hig std::shared_ptr query_context = ExecEnv::GetInstance()->fragment_mgr()->get_or_erase_query_ctx_with_lock( _load_id.to_thrift()); + std::shared_ptr mem_tracker = nullptr; + WorkloadGroupPtr wg_ptr = nullptr; + if (query_context != nullptr) { - _query_thread_context = {_load_id.to_thrift(), query_context->query_mem_tracker, - query_context->workload_group()}; + mem_tracker = query_context->query_mem_tracker; + wg_ptr = query_context->workload_group(); } else { - _query_thread_context = { - _load_id.to_thrift(), - MemTrackerLimiter::create_shared( - MemTrackerLimiter::Type::LOAD, - fmt::format("(FromLoadChannel)Load#Id={}", _load_id.to_string()))}; + // when memtable on sink is not enabled, load can not find queryctx + mem_tracker = MemTrackerLimiter::create_shared( + MemTrackerLimiter::Type::LOAD, + fmt::format("(FromLoadChannel)Load#Id={}", _load_id.to_string())); + if (wg_id > 0) { + WorkloadGroupPtr workload_group_ptr = + ExecEnv::GetInstance()->workload_group_mgr()->get_task_group_by_id(wg_id); + if (workload_group_ptr) { + wg_ptr = workload_group_ptr; + wg_ptr->add_mem_tracker_limiter(mem_tracker); + _need_release_memtracker = true; + } + } } + _query_thread_context = {_load_id.to_thrift(), mem_tracker, wg_ptr}; + g_loadchannel_cnt << 1; // _last_updated_time should be set before being inserted to // _load_channels in load_channel_mgr, or it may be erased @@ -71,6 +85,12 @@ LoadChannel::~LoadChannel() { rows_str << ", index id: " << entry.first << ", total_received_rows: " << entry.second.first << ", num_rows_filtered: " << entry.second.second; } + if (_need_release_memtracker) { + WorkloadGroupPtr wg_ptr = _query_thread_context.get_workload_group_ptr(); + if (wg_ptr) { + wg_ptr->remove_mem_tracker_limiter(_query_thread_context.get_memory_tracker()); + } + } LOG(INFO) << "load channel removed" << " load_id=" << _load_id << ", is high priority=" << _is_high_priority << ", sender_ip=" << _sender_ip << rows_str.str(); diff --git a/be/src/runtime/load_channel.h b/be/src/runtime/load_channel.h index 791e996574a569..6fad8c536ec4fa 100644 --- a/be/src/runtime/load_channel.h +++ b/be/src/runtime/load_channel.h @@ -46,7 +46,7 @@ class BaseTabletsChannel; class LoadChannel { public: LoadChannel(const UniqueId& load_id, int64_t timeout_s, bool is_high_priority, - std::string sender_ip, int64_t backend_id, bool enable_profile); + std::string sender_ip, int64_t backend_id, bool enable_profile, int64_t wg_id); ~LoadChannel(); // open a new load channel if not exist @@ -127,6 +127,7 @@ class LoadChannel { int64_t _backend_id; bool _enable_profile; + bool _need_release_memtracker = false; }; inline std::ostream& operator<<(std::ostream& os, LoadChannel& load_channel) { diff --git a/be/src/runtime/load_channel_mgr.cpp b/be/src/runtime/load_channel_mgr.cpp index d31ce1d9a7eaae..c53cade466be04 100644 --- a/be/src/runtime/load_channel_mgr.cpp +++ b/be/src/runtime/load_channel_mgr.cpp @@ -94,9 +94,13 @@ Status LoadChannelMgr::open(const PTabletWriterOpenRequest& params) { int64_t channel_timeout_s = calc_channel_timeout_s(timeout_in_req_s); bool is_high_priority = (params.has_is_high_priority() && params.is_high_priority()); + int64_t wg_id = -1; + if (params.has_workload_group_id()) { + wg_id = params.workload_group_id(); + } channel.reset(new LoadChannel(load_id, channel_timeout_s, is_high_priority, params.sender_ip(), params.backend_id(), - params.enable_profile())); + params.enable_profile(), wg_id)); _load_channels.insert({load_id, channel}); } } diff --git a/be/src/runtime/load_channel_mgr.h b/be/src/runtime/load_channel_mgr.h index c9c8f4c2a0f3cc..ec841047c951cc 100644 --- a/be/src/runtime/load_channel_mgr.h +++ b/be/src/runtime/load_channel_mgr.h @@ -64,6 +64,16 @@ class LoadChannelMgr { void stop(); + std::vector get_all_load_channel_ids() { + std::vector result; + std::lock_guard lock(_lock); + + for (auto& [id, _] : _load_channels) { + result.push_back(id.to_string()); + } + return result; + } + private: Status _get_load_channel(std::shared_ptr& channel, bool& is_eof, const UniqueId& load_id, const PTabletWriterAddBlockRequest& request); @@ -72,13 +82,12 @@ class LoadChannelMgr { Status _start_bg_worker(); - class LastSuccessChannelCache : public LRUCachePolicyTrackingManual { + class LastSuccessChannelCache : public LRUCachePolicy { public: LastSuccessChannelCache(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::LAST_SUCCESS_CHANNEL_CACHE, - capacity, LRUCacheType::SIZE, -1, - DEFAULT_LRU_CACHE_NUM_SHARDS, - DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY, false) {} + : LRUCachePolicy(CachePolicy::CacheType::LAST_SUCCESS_CHANNEL_CACHE, capacity, + LRUCacheType::SIZE, -1, DEFAULT_LRU_CACHE_NUM_SHARDS, + DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY, false) {} }; protected: diff --git a/be/src/runtime/load_stream.cpp b/be/src/runtime/load_stream.cpp index a52fa01370c41f..80cd167260c04d 100644 --- a/be/src/runtime/load_stream.cpp +++ b/be/src/runtime/load_stream.cpp @@ -293,7 +293,7 @@ Status TabletStream::close() { } DBUG_EXECUTE_IF("TabletStream.close.segment_num_mismatch", { _num_segments++; }); - if (_next_segid.load() != _num_segments) { + if (_check_num_segments && (_next_segid.load() != _num_segments)) { _status = Status::Corruption( "segment num mismatch in tablet {}, expected: {}, actual: {}, load_id: {}", _id, _num_segments, _next_segid.load(), print_id(_load_id)); @@ -380,9 +380,14 @@ void IndexStream::close(const std::vector& tablets_to_commit, auto it = _tablet_streams_map.find(tablet.tablet_id()); if (it == _tablet_streams_map.end()) { _init_tablet_stream(tablet_stream, tablet.tablet_id(), tablet.partition_id()); + } else { + tablet_stream = it->second; + } + if (tablet.has_num_segments()) { tablet_stream->add_num_segments(tablet.num_segments()); } else { - it->second->add_num_segments(tablet.num_segments()); + // for compatibility reasons (sink from old version BE) + tablet_stream->disable_num_segments_check(); } } diff --git a/be/src/runtime/load_stream.h b/be/src/runtime/load_stream.h index 427bc2dbb62cc8..3b649c688355fe 100644 --- a/be/src/runtime/load_stream.h +++ b/be/src/runtime/load_stream.h @@ -53,6 +53,7 @@ class TabletStream { Status append_data(const PStreamHeader& header, butil::IOBuf* data); Status add_segment(const PStreamHeader& header, butil::IOBuf* data); void add_num_segments(int64_t num_segments) { _num_segments += num_segments; } + void disable_num_segments_check() { _check_num_segments = false; } Status close(); int64_t id() const { return _id; } @@ -65,6 +66,7 @@ class TabletStream { std::unordered_map> _segids_mapping; std::atomic _next_segid; int64_t _num_segments = 0; + bool _check_num_segments = true; bthread::Mutex _lock; Status _status; PUniqueId _load_id; diff --git a/be/src/runtime/memory/global_memory_arbitrator.cpp b/be/src/runtime/memory/global_memory_arbitrator.cpp index 82b69ca02ef9f3..45d7781786f2d7 100644 --- a/be/src/runtime/memory/global_memory_arbitrator.cpp +++ b/be/src/runtime/memory/global_memory_arbitrator.cpp @@ -23,9 +23,6 @@ namespace doris { -std::mutex GlobalMemoryArbitrator::_reserved_trackers_lock; -std::unordered_map GlobalMemoryArbitrator::_reserved_trackers; - bvar::PassiveStatus g_vm_rss_sub_allocator_cache( "meminfo_vm_rss_sub_allocator_cache", [](void*) { return GlobalMemoryArbitrator::vm_rss_sub_allocator_cache(); }, nullptr); @@ -62,28 +59,11 @@ bool GlobalMemoryArbitrator::try_reserve_process_memory(int64_t bytes) { } } while (!_s_process_reserved_memory.compare_exchange_weak(old_reserved_mem, new_reserved_mem, std::memory_order_relaxed)); - { - std::lock_guard l(_reserved_trackers_lock); - _reserved_trackers[doris::thread_context()->thread_mem_tracker()->label()].add(bytes); - } return true; } void GlobalMemoryArbitrator::release_process_reserved_memory(int64_t bytes) { _s_process_reserved_memory.fetch_sub(bytes, std::memory_order_relaxed); - { - std::lock_guard l(_reserved_trackers_lock); - auto label = doris::thread_context()->thread_mem_tracker()->label(); - auto it = _reserved_trackers.find(label); - if (it == _reserved_trackers.end()) { - DCHECK(false) << "release unknown reserved memory " << label << ", bytes: " << bytes; - return; - } - _reserved_trackers[label].sub(bytes); - if (_reserved_trackers[label].current_value() == 0) { - _reserved_trackers.erase(it); - } - } } int64_t GlobalMemoryArbitrator::sub_thread_reserve_memory(int64_t bytes) { diff --git a/be/src/runtime/memory/global_memory_arbitrator.h b/be/src/runtime/memory/global_memory_arbitrator.h index f804452956786d..1859f45391fca3 100644 --- a/be/src/runtime/memory/global_memory_arbitrator.h +++ b/be/src/runtime/memory/global_memory_arbitrator.h @@ -17,7 +17,7 @@ #pragma once -#include "runtime/memory/mem_tracker.h" +#include "runtime/memory/mem_tracker_limiter.h" #include "util/mem_info.h" namespace doris { @@ -106,20 +106,6 @@ class GlobalMemoryArbitrator { static bool try_reserve_process_memory(int64_t bytes); static void release_process_reserved_memory(int64_t bytes); - static inline void make_reserved_memory_snapshots( - std::vector* snapshots) { - std::lock_guard l(_reserved_trackers_lock); - for (const auto& pair : _reserved_trackers) { - MemTracker::Snapshot snapshot; - snapshot.type = "reserved_memory"; - snapshot.label = pair.first; - snapshot.limit = -1; - snapshot.cur_consumption = pair.second.current_value(); - snapshot.peak_consumption = pair.second.peak_value(); - (*snapshots).emplace_back(snapshot); - } - } - static inline int64_t process_reserved_memory() { return _s_process_reserved_memory.load(std::memory_order_relaxed); } @@ -207,9 +193,6 @@ class GlobalMemoryArbitrator { private: static std::atomic _s_process_reserved_memory; - - static std::mutex _reserved_trackers_lock; - static std::unordered_map _reserved_trackers; }; } // namespace doris diff --git a/be/src/runtime/memory/lru_cache_policy.h b/be/src/runtime/memory/lru_cache_policy.h index 419825c85c4538..7b5a8ab9fec6d9 100644 --- a/be/src/runtime/memory/lru_cache_policy.h +++ b/be/src/runtime/memory/lru_cache_policy.h @@ -47,6 +47,7 @@ class LRUCachePolicy : public CachePolicy { CHECK(ExecEnv::GetInstance()->get_dummy_lru_cache()); _cache = ExecEnv::GetInstance()->get_dummy_lru_cache(); } + _init_mem_tracker(lru_cache_type_string(lru_cache_type)); } LRUCachePolicy(CacheType type, size_t capacity, LRUCacheType lru_cache_type, @@ -65,6 +66,7 @@ class LRUCachePolicy : public CachePolicy { CHECK(ExecEnv::GetInstance()->get_dummy_lru_cache()); _cache = ExecEnv::GetInstance()->get_dummy_lru_cache(); } + _init_mem_tracker(lru_cache_type_string(lru_cache_type)); } void reset_cache() { _cache.reset(); } @@ -92,11 +94,33 @@ class LRUCachePolicy : public CachePolicy { } } - virtual int64_t mem_consumption() = 0; + std::shared_ptr mem_tracker() const { + DCHECK(_mem_tracker != nullptr); + return _mem_tracker; + } - virtual Cache::Handle* insert(const CacheKey& key, void* value, size_t charge, - size_t tracking_bytes, - CachePriority priority = CachePriority::NORMAL) = 0; + int64_t mem_consumption() { + DCHECK(_mem_tracker != nullptr); + return _mem_tracker->consumption(); + } + + // Insert will consume tracking_bytes to _mem_tracker and cache value destroy will release tracking_bytes. + // If LRUCacheType::SIZE, tracking_bytes usually equal to charge. + // If LRUCacheType::NUMBER, tracking_bytes usually not equal to charge, at this time charge is an weight. + // If LRUCacheType::SIZE and tracking_bytes equals 0, memory must be tracked in Doris Allocator, + // cache value is allocated using Alloctor. + // If LRUCacheType::NUMBER and tracking_bytes equals 0, usually currently cannot accurately tracking memory size, + // only tracking handle_size(106). + Cache::Handle* insert(const CacheKey& key, void* value, size_t charge, size_t tracking_bytes, + CachePriority priority = CachePriority::NORMAL) { + size_t tracking_bytes_with_handle = sizeof(LRUHandle) - 1 + key.size() + tracking_bytes; + if (value != nullptr) { + mem_tracker()->consume(tracking_bytes_with_handle); + ((LRUCacheValueBase*)value) + ->set_tracking_bytes(tracking_bytes_with_handle, _mem_tracker); + } + return _cache->insert(key, value, charge, priority); + } Cache::Handle* lookup(const CacheKey& key) { return _cache->lookup(key); } @@ -238,128 +262,19 @@ class LRUCachePolicy : public CachePolicy { } protected: + void _init_mem_tracker(const std::string& type_name) { + _mem_tracker = MemTrackerLimiter::create_shared( + MemTrackerLimiter::Type::GLOBAL, + fmt::format("{}[{}]", type_string(_type), type_name)); + } + // if check_capacity failed, will return dummy lru cache, // compatible with ShardedLRUCache usage, but will not actually cache. std::shared_ptr _cache; std::mutex _lock; LRUCacheType _lru_cache_type; -}; - -class LRUCachePolicyTrackingAllocator : public LRUCachePolicy { -public: - LRUCachePolicyTrackingAllocator( - CacheType type, size_t capacity, LRUCacheType lru_cache_type, - uint32_t stale_sweep_time_s, uint32_t num_shards = DEFAULT_LRU_CACHE_NUM_SHARDS, - uint32_t element_count_capacity = DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY, - bool enable_prune = true) - : LRUCachePolicy(type, capacity, lru_cache_type, stale_sweep_time_s, num_shards, - element_count_capacity, enable_prune) { - _init_mem_tracker(lru_cache_type_string(lru_cache_type)); - } - - LRUCachePolicyTrackingAllocator(CacheType type, size_t capacity, LRUCacheType lru_cache_type, - uint32_t stale_sweep_time_s, uint32_t num_shards, - uint32_t element_count_capacity, - CacheValueTimeExtractor cache_value_time_extractor, - bool cache_value_check_timestamp, bool enable_prune = true) - : LRUCachePolicy(type, capacity, lru_cache_type, stale_sweep_time_s, num_shards, - element_count_capacity, cache_value_time_extractor, - cache_value_check_timestamp, enable_prune) { - _init_mem_tracker(lru_cache_type_string(lru_cache_type)); - } - - ~LRUCachePolicyTrackingAllocator() override { reset_cache(); } - - std::shared_ptr mem_tracker() const { - DCHECK(_mem_tracker != nullptr); - return _mem_tracker; - } - - int64_t mem_consumption() override { - DCHECK(_mem_tracker != nullptr); - return _mem_tracker->consumption(); - } - - Cache::Handle* insert(const CacheKey& key, void* value, size_t charge, size_t tracking_bytes, - CachePriority priority = CachePriority::NORMAL) override { - return _cache->insert(key, value, charge, priority); - } - -protected: - void _init_mem_tracker(const std::string& type_name) { - _mem_tracker = MemTrackerLimiter::create_shared( - MemTrackerLimiter::Type::GLOBAL, - fmt::format("{}[{}](AllocByAllocator)", type_string(_type), type_name)); - } std::shared_ptr _mem_tracker; }; -class LRUCachePolicyTrackingManual : public LRUCachePolicy { -public: - LRUCachePolicyTrackingManual( - CacheType type, size_t capacity, LRUCacheType lru_cache_type, - uint32_t stale_sweep_time_s, uint32_t num_shards = DEFAULT_LRU_CACHE_NUM_SHARDS, - uint32_t element_count_capacity = DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY, - bool enable_prune = true) - : LRUCachePolicy(type, capacity, lru_cache_type, stale_sweep_time_s, num_shards, - element_count_capacity, enable_prune) { - _init_mem_tracker(lru_cache_type_string(lru_cache_type)); - } - - LRUCachePolicyTrackingManual(CacheType type, size_t capacity, LRUCacheType lru_cache_type, - uint32_t stale_sweep_time_s, uint32_t num_shards, - uint32_t element_count_capacity, - CacheValueTimeExtractor cache_value_time_extractor, - bool cache_value_check_timestamp, bool enable_prune = true) - : LRUCachePolicy(type, capacity, lru_cache_type, stale_sweep_time_s, num_shards, - element_count_capacity, cache_value_time_extractor, - cache_value_check_timestamp, enable_prune) { - _init_mem_tracker(lru_cache_type_string(lru_cache_type)); - } - - ~LRUCachePolicyTrackingManual() override { reset_cache(); } - - MemTracker* mem_tracker() { - DCHECK(_mem_tracker != nullptr); - return _mem_tracker.get(); - } - - int64_t mem_consumption() override { - DCHECK(_mem_tracker != nullptr); - return _mem_tracker->consumption(); - } - - // Insert and cache value destroy will be manually consume tracking_bytes to mem tracker. - // If lru cache is LRUCacheType::SIZE, tracking_bytes usually equal to charge. - Cache::Handle* insert(const CacheKey& key, void* value, size_t charge, size_t tracking_bytes, - CachePriority priority = CachePriority::NORMAL) override { - size_t bytes_with_handle = _get_bytes_with_handle(key, charge, tracking_bytes); - if (value != nullptr) { // if tracking_bytes = 0, only tracking handle size. - mem_tracker()->consume(bytes_with_handle); - ((LRUCacheValueBase*)value)->set_tracking_bytes(bytes_with_handle, mem_tracker()); - } - return _cache->insert(key, value, charge, priority); - } - -private: - void _init_mem_tracker(const std::string& type_name) { - _mem_tracker = - std::make_unique(fmt::format("{}[{}]", type_string(_type), type_name), - ExecEnv::GetInstance()->details_mem_tracker_set()); - } - - // LRUCacheType::SIZE equal to total_size. - size_t _get_bytes_with_handle(const CacheKey& key, size_t charge, size_t bytes) { - size_t handle_size = sizeof(LRUHandle) - 1 + key.size(); - DCHECK(_lru_cache_type == LRUCacheType::SIZE || bytes != -1) - << " _type " << type_string(_type); - // if LRUCacheType::NUMBER and bytes equals 0, such as some caches cannot accurately track memory size. - // cache mem tracker value and _usage divided by handle_size(106) will get the number of cache entries. - return _lru_cache_type == LRUCacheType::SIZE ? handle_size + charge : handle_size + bytes; - } - - std::unique_ptr _mem_tracker; -}; - } // namespace doris diff --git a/be/src/runtime/memory/lru_cache_value_base.h b/be/src/runtime/memory/lru_cache_value_base.h index 6d4b2991a023a6..f9e534e6600df8 100644 --- a/be/src/runtime/memory/lru_cache_value_base.h +++ b/be/src/runtime/memory/lru_cache_value_base.h @@ -27,18 +27,19 @@ class LRUCacheValueBase { public: virtual ~LRUCacheValueBase() { if (_tracking_bytes > 0) { - _mem_tracker->consume(-_tracking_bytes); + _mem_tracker->release(_tracking_bytes); } } - void set_tracking_bytes(size_t tracking_bytes, MemTracker* mem_tracker) { + void set_tracking_bytes(size_t tracking_bytes, + const std::shared_ptr& mem_tracker) { this->_tracking_bytes = tracking_bytes; this->_mem_tracker = mem_tracker; } protected: size_t _tracking_bytes = 0; - MemTracker* _mem_tracker = nullptr; + std::shared_ptr _mem_tracker; }; } // namespace doris diff --git a/be/src/runtime/memory/mem_counter.h b/be/src/runtime/memory/mem_counter.h new file mode 100644 index 00000000000000..8964a5dc63f732 --- /dev/null +++ b/be/src/runtime/memory/mem_counter.h @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// This file is copied from +#pragma once + +#include +#include +#include + +#include "common/compiler_util.h" +#include "util/pretty_printer.h" + +namespace doris { + +/* + * A counter that keeps track of the current and peak memory usage seen. + * Relaxed ordering, not accurate in real time. + * + * This class is thread-safe. +*/ +class MemCounter { +public: + MemCounter() = default; + + void add(int64_t delta) { + if (UNLIKELY(delta == 0)) { + return; + } + int64_t value = _current_value.fetch_add(delta, std::memory_order_relaxed) + delta; + update_peak(value); + } + + void add_no_update_peak(int64_t delta) { // need extreme fast + _current_value.fetch_add(delta, std::memory_order_relaxed); + } + + bool try_add(int64_t delta, int64_t max) { + if (UNLIKELY(delta == 0)) { + return true; + } + int64_t cur_val = _current_value.load(std::memory_order_relaxed); + int64_t new_val = 0; + do { + new_val = cur_val + delta; + if (UNLIKELY(new_val > max)) { + return false; + } + } while (UNLIKELY(!_current_value.compare_exchange_weak(cur_val, new_val, + std::memory_order_relaxed))); + update_peak(new_val); + return true; + } + + void sub(int64_t delta) { _current_value.fetch_sub(delta, std::memory_order_relaxed); } + + void set(int64_t v) { + _current_value.store(v, std::memory_order_relaxed); + update_peak(v); + } + + void update_peak(int64_t value) { + int64_t pre_value = _peak_value.load(std::memory_order_relaxed); + while (value > pre_value && + !_peak_value.compare_exchange_weak(pre_value, value, std::memory_order_relaxed)) { + } + } + + int64_t current_value() const { return _current_value.load(std::memory_order_relaxed); } + int64_t peak_value() const { return _peak_value.load(std::memory_order_relaxed); } + + static std::string print_bytes(int64_t bytes) { + return bytes >= 0 ? PrettyPrinter::print(bytes, TUnit::BYTES) + : "-" + PrettyPrinter::print(std::abs(bytes), TUnit::BYTES); + } + +private: + std::atomic _current_value {0}; + std::atomic _peak_value {0}; +}; + +} // namespace doris diff --git a/be/src/runtime/memory/mem_tracker.cpp b/be/src/runtime/memory/mem_tracker.cpp index f5a3853f79f84d..796e6c166e04fe 100644 --- a/be/src/runtime/memory/mem_tracker.cpp +++ b/be/src/runtime/memory/mem_tracker.cpp @@ -15,100 +15,39 @@ // specific language governing permissions and limitations // under the License. // This file is copied from -// https://github.com/apache/impala/blob/branch-2.9.0/be/src/runtime/mem-tracker.cpp -// and modified by Doris #include "runtime/memory/mem_tracker.h" -#include - -#include - -#include "bvar/bvar.h" -#include "runtime/memory/mem_tracker_limiter.h" -#include "runtime/thread_context.h" +#include namespace doris { +constexpr size_t MEM_TRACKERS_GROUP_NUM = 1000; +std::atomic mem_tracker_group_counter(0); bvar::Adder g_memtracker_cnt("memtracker_cnt"); -// Save all MemTrackers in use to maintain the weak relationship between MemTracker and MemTrackerLimiter. -// When MemTrackerLimiter prints statistics, all MemTracker statistics with weak relationship will be printed together. -// Each group corresponds to several MemTrackerLimiters and has a lock. -// Multiple groups are used to reduce the impact of locks. -std::vector MemTracker::mem_tracker_pool(1000); +std::vector MemTracker::mem_tracker_pool(MEM_TRACKERS_GROUP_NUM); -MemTracker::MemTracker(const std::string& label, MemTrackerLimiter* parent) : _label(label) { - _consumption = std::make_shared(); - bind_parent(parent); -} - -void MemTracker::bind_parent(MemTrackerLimiter* parent) { - if (parent) { - _type = parent->type(); - _parent_label = parent->label(); - _parent_group_num = parent->group_num(); - } else { - _type = thread_context()->thread_mem_tracker()->type(); - _parent_label = thread_context()->thread_mem_tracker()->label(); - _parent_group_num = thread_context()->thread_mem_tracker()->group_num(); - } +MemTracker::MemTracker(const std::string& label) { + _label = label; + _group_num = mem_tracker_group_counter.fetch_add(1) % MEM_TRACKERS_GROUP_NUM; { - std::lock_guard l(mem_tracker_pool[_parent_group_num].group_lock); - _tracker_group_it = mem_tracker_pool[_parent_group_num].trackers.insert( - mem_tracker_pool[_parent_group_num].trackers.end(), this); + std::lock_guard l(mem_tracker_pool[_group_num].group_lock); + _trackers_group_it = mem_tracker_pool[_group_num].trackers.insert( + mem_tracker_pool[_group_num].trackers.end(), this); } g_memtracker_cnt << 1; } MemTracker::~MemTracker() { - if (_parent_group_num != -1) { - std::lock_guard l(mem_tracker_pool[_parent_group_num].group_lock); - if (_tracker_group_it != mem_tracker_pool[_parent_group_num].trackers.end()) { - mem_tracker_pool[_parent_group_num].trackers.erase(_tracker_group_it); - _tracker_group_it = mem_tracker_pool[_parent_group_num].trackers.end(); + if (_group_num != -1) { + std::lock_guard l(mem_tracker_pool[_group_num].group_lock); + if (_trackers_group_it != mem_tracker_pool[_group_num].trackers.end()) { + mem_tracker_pool[_group_num].trackers.erase(_trackers_group_it); + _trackers_group_it = mem_tracker_pool[_group_num].trackers.end(); } g_memtracker_cnt << -1; } } -MemTracker::Snapshot MemTracker::make_snapshot() const { - Snapshot snapshot; - snapshot.type = type_string(_type); - snapshot.label = _label; - snapshot.parent_label = _parent_label; - snapshot.limit = -1; - snapshot.cur_consumption = _consumption->current_value(); - snapshot.peak_consumption = _consumption->peak_value(); - return snapshot; -} - -void MemTracker::make_group_snapshot(std::vector* snapshots, - int64_t group_num, std::string parent_label) { - std::lock_guard l(mem_tracker_pool[group_num].group_lock); - for (auto* tracker : mem_tracker_pool[group_num].trackers) { - if (tracker->parent_label() == parent_label && tracker->peak_consumption() != 0) { - snapshots->push_back(tracker->make_snapshot()); - } - } -} - -void MemTracker::make_all_trackers_snapshots(std::vector* snapshots) { - for (auto& i : mem_tracker_pool) { - std::lock_guard l(i.group_lock); - for (auto* tracker : i.trackers) { - if (tracker->peak_consumption() != 0) { - snapshots->push_back(tracker->make_snapshot()); - } - } - } -} - -std::string MemTracker::log_usage(MemTracker::Snapshot snapshot) { - return fmt::format("MemTracker Label={}, Parent Label={}, Used={}({} B), Peak={}({} B)", - snapshot.label, snapshot.parent_label, print_bytes(snapshot.cur_consumption), - snapshot.cur_consumption, print_bytes(snapshot.peak_consumption), - snapshot.peak_consumption); -} - } // namespace doris \ No newline at end of file diff --git a/be/src/runtime/memory/mem_tracker.h b/be/src/runtime/memory/mem_tracker.h index 8a977e49388d52..82b05fe544afc8 100644 --- a/be/src/runtime/memory/mem_tracker.h +++ b/be/src/runtime/memory/mem_tracker.h @@ -15,216 +15,59 @@ // specific language governing permissions and limitations // under the License. // This file is copied from -// https://github.com/apache/impala/blob/branch-2.9.0/be/src/runtime/mem-tracker.h -// and modified by Doris #pragma once -#include -#include - -#include -// IWYU pragma: no_include -#include // IWYU pragma: keep -#include -#include -#include +#include #include -#include -#include "common/compiler_util.h" // IWYU pragma: keep -#include "runtime/query_statistics.h" -#include "util/pretty_printer.h" +#include "runtime/memory/mem_counter.h" namespace doris { -class MemTrackerLimiter; - -// Used to track memory usage. -// -// MemTracker can be consumed manually by consume()/release(), or put into SCOPED_CONSUME_MEM_TRACKER, -// which will automatically track all memory usage of the code segment where it is located. -// -// This class is thread-safe. -class MemTracker { +/* + * can be consumed manually by consume()/release(), or put into SCOPED_CONSUME_MEM_TRACKER, + * which will automatically track all memory usage of the code segment where it is located. + * + * This class is thread-safe. +*/ +class MemTracker final { public: - struct Snapshot { - std::string type; - std::string label; - std::string parent_label; - int64_t limit = 0; - int64_t cur_consumption = 0; - int64_t peak_consumption = 0; - - bool operator<(const Snapshot& rhs) const { return cur_consumption < rhs.cur_consumption; } - }; - - struct TrackerGroup { - std::list trackers; - std::mutex group_lock; - }; - - enum class Type { - GLOBAL = 0, // Life cycle is the same as the process, e.g. Cache and default Orphan - QUERY = 1, // Count the memory consumption of all Query tasks. - LOAD = 2, // Count the memory consumption of all Load tasks. - COMPACTION = 3, // Count the memory consumption of all Base and Cumulative tasks. - SCHEMA_CHANGE = 4, // Count the memory consumption of all SchemaChange tasks. - OTHER = 5 - }; - - static std::string type_string(Type type) { - switch (type) { - case Type::GLOBAL: - return "global"; - case Type::QUERY: - return "query"; - case Type::LOAD: - return "load"; - case Type::COMPACTION: - return "compaction"; - case Type::SCHEMA_CHANGE: - return "schema_change"; - case Type::OTHER: - return "other"; - default: - LOG(FATAL) << "not match type of mem tracker limiter :" << static_cast(type); - } - LOG(FATAL) << "__builtin_unreachable"; - __builtin_unreachable(); - } - - // A counter that keeps track of the current and peak value seen. - // Relaxed ordering, not accurate in real time. - class MemCounter { - public: - MemCounter() : _current_value(0), _peak_value(0) {} - - void add(int64_t delta) { - int64_t value = _current_value.fetch_add(delta, std::memory_order_relaxed) + delta; - update_peak(value); - } - - void add_no_update_peak(int64_t delta) { - _current_value.fetch_add(delta, std::memory_order_relaxed); - } - - bool try_add(int64_t delta, int64_t max) { - int64_t cur_val = _current_value.load(std::memory_order_relaxed); - int64_t new_val = 0; - do { - new_val = cur_val + delta; - if (UNLIKELY(new_val > max)) { - return false; - } - } while (UNLIKELY(!_current_value.compare_exchange_weak(cur_val, new_val, - std::memory_order_relaxed))); - update_peak(new_val); - return true; - } - - void sub(int64_t delta) { _current_value.fetch_sub(delta, std::memory_order_relaxed); } - - void set(int64_t v) { - _current_value.store(v, std::memory_order_relaxed); - update_peak(v); - } - - void update_peak(int64_t value) { - int64_t pre_value = _peak_value.load(std::memory_order_relaxed); - while (value > pre_value && !_peak_value.compare_exchange_weak( - pre_value, value, std::memory_order_relaxed)) { - } - } - - int64_t current_value() const { return _current_value.load(std::memory_order_relaxed); } - int64_t peak_value() const { return _peak_value.load(std::memory_order_relaxed); } - - private: - std::atomic _current_value; - std::atomic _peak_value; - }; - - // Creates and adds the tracker to the mem_tracker_pool. - MemTracker(const std::string& label, MemTrackerLimiter* parent = nullptr); + MemTracker() = default; + MemTracker(const std::string& label); + ~MemTracker(); - virtual ~MemTracker(); + void consume(int64_t bytes) { _mem_counter.add(bytes); } + void consume_no_update_peak(int64_t bytes) { _mem_counter.add_no_update_peak(bytes); } + void release(int64_t bytes) { _mem_counter.sub(bytes); } + void set_consumption(int64_t bytes) { _mem_counter.set(bytes); } + int64_t consumption() const { return _mem_counter.current_value(); } + int64_t peak_consumption() const { return _mem_counter.peak_value(); } - static std::string print_bytes(int64_t bytes) { - return bytes >= 0 ? PrettyPrinter::print(bytes, TUnit::BYTES) - : "-" + PrettyPrinter::print(std::abs(bytes), TUnit::BYTES); - } - -public: - Type type() const { return _type; } const std::string& label() const { return _label; } - const std::string& parent_label() const { return _parent_label; } - const std::string& set_parent_label() const { return _parent_label; } - // Returns the memory consumed in bytes. - int64_t consumption() const { return _consumption->current_value(); } - int64_t peak_consumption() const { return _consumption->peak_value(); } - - void consume(int64_t bytes) { - if (UNLIKELY(bytes == 0)) { - return; - } - _consumption->add(bytes); - if (_query_statistics) { - _query_statistics->set_max_peak_memory_bytes(_consumption->peak_value()); - _query_statistics->set_current_used_memory_bytes(_consumption->current_value()); - } - } - - void consume_no_update_peak(int64_t bytes) { // need extreme fast - _consumption->add_no_update_peak(bytes); - } - - void release(int64_t bytes) { _consumption->sub(bytes); } - - void set_consumption(int64_t bytes) { _consumption->set(bytes); } - - std::shared_ptr get_query_statistics() { return _query_statistics; } - -public: - virtual Snapshot make_snapshot() const; - // Specify group_num from mem_tracker_pool to generate snapshot. - static void make_group_snapshot(std::vector* snapshots, int64_t group_num, - std::string parent_label); - static void make_all_trackers_snapshots(std::vector* snapshots); - static std::string log_usage(MemTracker::Snapshot snapshot); - - virtual std::string debug_string() { - std::stringstream msg; - msg << "label: " << _label << "; " - << "consumption: " << consumption() << "; " - << "peak_consumption: " << peak_consumption() << "; "; - return msg.str(); + std::string log_usage() const { + return fmt::format("MemTracker name={}, Used={}({} B), Peak={}({} B)", _label, + MemCounter::print_bytes(consumption()), consumption(), + MemCounter::print_bytes(peak_consumption()), peak_consumption()); } -protected: - // Only used by MemTrackerLimiter - MemTracker() { _parent_group_num = -1; } - - void bind_parent(MemTrackerLimiter* parent); - - Type _type; - - // label used in the make snapshot, not guaranteed unique. - std::string _label; - - std::shared_ptr _consumption = nullptr; - - // Tracker is located in group num in mem_tracker_pool - int64_t _parent_group_num = 0; - - // Use _parent_label to correlate with parent limiter tracker. - std::string _parent_label = "-"; - - static std::vector mem_tracker_pool; +private: + MemCounter _mem_counter; + std::string _label {"None"}; + /* + * Save all MemTrackers, used by dump memory info. + */ + struct TrackersGroup { + std::list trackers; + std::mutex group_lock; + }; + // Each group corresponds to several MemCountes and has a lock. + // Multiple groups are used to reduce the impact of locks. + static std::vector mem_tracker_pool; + // Group number in mem_tracker_pool, generated by the timestamp. + int64_t _group_num {-1}; // Iterator into mem_tracker_pool for this object. Stored to have O(1) remove. - std::list::iterator _tracker_group_it; - - std::shared_ptr _query_statistics = nullptr; + std::list::iterator _trackers_group_it; }; } // namespace doris diff --git a/be/src/runtime/memory/mem_tracker_limiter.cpp b/be/src/runtime/memory/mem_tracker_limiter.cpp index 85cceb365c8bd2..78e66b6a579b79 100644 --- a/be/src/runtime/memory/mem_tracker_limiter.cpp +++ b/be/src/runtime/memory/mem_tracker_limiter.cpp @@ -19,16 +19,13 @@ #include #include -#include #include #include #include #include -#include "bvar/bvar.h" #include "common/config.h" -#include "olap/memtable_memory_limiter.h" #include "runtime/exec_env.h" #include "runtime/fragment_mgr.h" #include "runtime/memory/global_memory_arbitrator.h" @@ -37,9 +34,7 @@ #include "service/backend_options.h" #include "util/mem_info.h" #include "util/perf_counters.h" -#include "util/pretty_printer.h" #include "util/runtime_profile.h" -#include "util/stack_util.h" namespace doris { @@ -54,6 +49,7 @@ static bvar::Adder memory_schema_change_trackers_sum_bytes( "memory_schema_change_trackers_sum_bytes"); static bvar::Adder memory_other_trackers_sum_bytes("memory_other_trackers_sum_bytes"); +std::atomic mem_tracker_limiter_group_counter(0); constexpr auto GC_MAX_SEEK_TRACKER = 1000; std::atomic MemTrackerLimiter::_enable_print_log_process_usage {true}; @@ -76,14 +72,14 @@ static RuntimeProfile::Counter* previously_canceling_tasks_counter = MemTrackerLimiter::MemTrackerLimiter(Type type, const std::string& label, int64_t byte_limit) { DCHECK_GE(byte_limit, -1); - _consumption = std::make_shared(); _type = type; _label = label; _limit = byte_limit; if (_type == Type::GLOBAL) { _group_num = 0; } else { - _group_num = random() % 999 + 1; + _group_num = + mem_tracker_limiter_group_counter.fetch_add(1) % (MEM_TRACKER_GROUP_NUM - 1) + 1; } // currently only select/load need runtime query statistics @@ -108,6 +104,12 @@ std::shared_ptr MemTrackerLimiter::create_shared(MemTrackerLi return tracker; } +bool MemTrackerLimiter::open_memory_tracker_inaccurate_detect() { + return doris::config::crash_in_memory_tracker_inaccurate && + (_type == Type::COMPACTION || _type == Type::SCHEMA_CHANGE || _type == Type::QUERY || + (_type == Type::LOAD && !is_group_commit_load)); +} + MemTrackerLimiter::~MemTrackerLimiter() { consume(_untracked_mem); static std::string mem_tracker_inaccurate_msg = @@ -126,36 +128,28 @@ MemTrackerLimiter::~MemTrackerLimiter() { "tracker web or log, this indicates that there may be a memory leak. " "4. If you need to " "transfer memory tracking value between two trackers, can use transfer_to."; - if (_consumption->current_value() != 0) { -// TODO, expect mem tracker equal to 0 at the load/compaction/etc. task end. -#ifndef NDEBUG - if (_type == Type::COMPACTION || _type == Type::SCHEMA_CHANGE || _type == Type::QUERY || - (_type == Type::LOAD && !is_group_commit_load)) { - std::string err_msg = - fmt::format("mem tracker label: {}, consumption: {}, peak consumption: {}, {}.", - label(), _consumption->current_value(), _consumption->peak_value(), - mem_tracker_inaccurate_msg); + if (consumption() != 0) { + if (open_memory_tracker_inaccurate_detect()) { + std::string err_msg = fmt::format( + "mem tracker label: {}, consumption: {}, peak consumption: {}, {}.", label(), + consumption(), peak_consumption(), mem_tracker_inaccurate_msg); LOG(FATAL) << err_msg << print_address_sanitizers(); } -#endif if (ExecEnv::tracking_memory()) { - ExecEnv::GetInstance()->orphan_mem_tracker()->consume(_consumption->current_value()); + ExecEnv::GetInstance()->orphan_mem_tracker()->consume(consumption()); } - _consumption->set(0); -#ifndef NDEBUG - } else if (!_address_sanitizers.empty() && !is_group_commit_load) { + _mem_counter.set(0); + } else if (open_memory_tracker_inaccurate_detect() && !_address_sanitizers.empty()) { LOG(FATAL) << "[Address Sanitizer] consumption is 0, but address sanitizers not empty. " << ", mem tracker label: " << _label - << ", peak consumption: " << _consumption->peak_value() - << print_address_sanitizers(); -#endif + << ", peak consumption: " << peak_consumption() << print_address_sanitizers(); } + DCHECK(reserved_consumption() == 0); memory_memtrackerlimiter_cnt << -1; } -#ifndef NDEBUG void MemTrackerLimiter::add_address_sanitizers(void* buf, size_t size) { - if (_type == Type::QUERY || (_type == Type::LOAD && !is_group_commit_load)) { + if (open_memory_tracker_inaccurate_detect()) { std::lock_guard l(_address_sanitizers_mtx); auto it = _address_sanitizers.find(buf); if (it != _address_sanitizers.end()) { @@ -163,9 +157,9 @@ void MemTrackerLimiter::add_address_sanitizers(void* buf, size_t size) { fmt::format("[Address Sanitizer] memory buf repeat add, mem tracker label: {}, " "consumption: {}, peak consumption: {}, buf: {}, size: {}, old " "buf: {}, old size: {}, new stack_trace: {}, old stack_trace: {}.", - _label, _consumption->current_value(), _consumption->peak_value(), - buf, size, it->first, it->second.size, - get_stack_trace(1, "FULL_WITH_INLINE"), it->second.stack_trace)); + _label, consumption(), peak_consumption(), buf, size, it->first, + it->second.size, get_stack_trace(1, "FULL_WITH_INLINE"), + it->second.stack_trace)); } // if alignment not equal to 0, maybe usable_size > size. @@ -177,7 +171,7 @@ void MemTrackerLimiter::add_address_sanitizers(void* buf, size_t size) { } void MemTrackerLimiter::remove_address_sanitizers(void* buf, size_t size) { - if (_type == Type::QUERY || (_type == Type::LOAD && !is_group_commit_load)) { + if (open_memory_tracker_inaccurate_detect()) { std::lock_guard l(_address_sanitizers_mtx); auto it = _address_sanitizers.find(buf); if (it != _address_sanitizers.end()) { @@ -186,8 +180,8 @@ void MemTrackerLimiter::remove_address_sanitizers(void* buf, size_t size) { "[Address Sanitizer] free memory buf size inaccurate, mem tracker label: " "{}, consumption: {}, peak consumption: {}, buf: {}, size: {}, old buf: " "{}, old size: {}, new stack_trace: {}, old stack_trace: {}.", - _label, _consumption->current_value(), _consumption->peak_value(), buf, - size, it->first, it->second.size, get_stack_trace(1, "FULL_WITH_INLINE"), + _label, consumption(), peak_consumption(), buf, size, it->first, + it->second.size, get_stack_trace(1, "FULL_WITH_INLINE"), it->second.stack_trace)); } _address_sanitizers.erase(buf); @@ -195,7 +189,7 @@ void MemTrackerLimiter::remove_address_sanitizers(void* buf, size_t size) { _error_address_sanitizers.emplace_back(fmt::format( "[Address Sanitizer] memory buf not exist, mem tracker label: {}, consumption: " "{}, peak consumption: {}, buf: {}, size: {}, stack_trace: {}.", - _label, _consumption->current_value(), _consumption->peak_value(), buf, size, + _label, consumption(), peak_consumption(), buf, size, get_stack_trace(1, "FULL_WITH_INLINE"))); } } @@ -209,8 +203,8 @@ std::string MemTrackerLimiter::print_address_sanitizers() { auto msg = fmt::format( "\n [Address Sanitizer] buf not be freed, mem tracker label: {}, consumption: " "{}, peak consumption: {}, buf: {}, size {}, strack trace: {}", - _label, _consumption->current_value(), _consumption->peak_value(), it.first, - it.second.size, it.second.stack_trace); + _label, consumption(), peak_consumption(), it.first, it.second.size, + it.second.stack_trace); LOG(INFO) << msg; detail += msg; } @@ -221,18 +215,39 @@ std::string MemTrackerLimiter::print_address_sanitizers() { } return detail; } -#endif -MemTracker::Snapshot MemTrackerLimiter::make_snapshot() const { +MemTrackerLimiter::Snapshot MemTrackerLimiter::make_snapshot() const { Snapshot snapshot; snapshot.type = type_string(_type); snapshot.label = _label; snapshot.limit = _limit; - snapshot.cur_consumption = _consumption->current_value(); - snapshot.peak_consumption = _consumption->peak_value(); + snapshot.cur_consumption = consumption(); + snapshot.peak_consumption = peak_consumption(); return snapshot; } +MemTrackerLimiter::Snapshot MemTrackerLimiter::make_reserved_trackers_snapshot() const { + Snapshot snapshot; + snapshot.type = "reserved_memory"; + snapshot.label = _label; + snapshot.limit = -1; + snapshot.cur_consumption = reserved_consumption(); + snapshot.peak_consumption = reserved_peak_consumption(); + return snapshot; +} + +void MemTrackerLimiter::make_all_reserved_trackers_snapshots(std::vector* snapshots) { + for (auto& i : ExecEnv::GetInstance()->mem_tracker_limiter_pool) { + std::lock_guard l(i.group_lock); + for (auto trackerWptr : i.trackers) { + auto tracker = trackerWptr.lock(); + if (tracker != nullptr && tracker->reserved_consumption() != 0) { + (*snapshots).emplace_back(tracker->make_reserved_trackers_snapshot()); + } + } + } +} + void MemTrackerLimiter::refresh_global_counter() { std::unordered_map type_mem_sum = { {Type::GLOBAL, 0}, {Type::QUERY, 0}, {Type::LOAD, 0}, @@ -249,7 +264,8 @@ void MemTrackerLimiter::refresh_global_counter() { } int64_t all_trackers_mem_sum = 0; for (auto it : type_mem_sum) { - MemTrackerLimiter::TypeMemSum[it.first]->set(it.second); + MemTrackerLimiter::TypeMemSum[it.first].set(it.second); + all_trackers_mem_sum += it.second; switch (it.first) { case Type::GLOBAL: @@ -301,18 +317,18 @@ void MemTrackerLimiter::clean_tracker_limiter_group() { #endif } -void MemTrackerLimiter::make_process_snapshots(std::vector* snapshots) { +void MemTrackerLimiter::make_process_snapshots(std::vector* snapshots) { MemTrackerLimiter::refresh_global_counter(); int64_t all_trackers_mem_sum = 0; Snapshot snapshot; - for (auto it : MemTrackerLimiter::TypeMemSum) { + for (const auto& it : MemTrackerLimiter::TypeMemSum) { snapshot.type = "overview"; snapshot.label = type_string(it.first); snapshot.limit = -1; - snapshot.cur_consumption = it.second->current_value(); - snapshot.peak_consumption = it.second->peak_value(); + snapshot.cur_consumption = it.second.current_value(); + snapshot.peak_consumption = it.second.peak_value(); (*snapshots).emplace_back(snapshot); - all_trackers_mem_sum += it.second->current_value(); + all_trackers_mem_sum += it.second.current_value(); } snapshot.type = "overview"; @@ -364,7 +380,7 @@ void MemTrackerLimiter::make_process_snapshots(std::vector (*snapshots).emplace_back(snapshot); } -void MemTrackerLimiter::make_type_snapshots(std::vector* snapshots, +void MemTrackerLimiter::make_type_snapshots(std::vector* snapshots, MemTrackerLimiter::Type type) { if (type == Type::GLOBAL) { std::lock_guard l( @@ -373,7 +389,6 @@ void MemTrackerLimiter::make_type_snapshots(std::vector* s auto tracker = trackerWptr.lock(); if (tracker != nullptr) { (*snapshots).emplace_back(tracker->make_snapshot()); - MemTracker::make_group_snapshot(snapshots, tracker->group_num(), tracker->label()); } } } else { @@ -384,17 +399,15 @@ void MemTrackerLimiter::make_type_snapshots(std::vector* s auto tracker = trackerWptr.lock(); if (tracker != nullptr && tracker->type() == type) { (*snapshots).emplace_back(tracker->make_snapshot()); - MemTracker::make_group_snapshot(snapshots, tracker->group_num(), - tracker->label()); } } } } } -void MemTrackerLimiter::make_top_consumption_snapshots(std::vector* snapshots, +void MemTrackerLimiter::make_top_consumption_snapshots(std::vector* snapshots, int top_num) { - std::priority_queue max_pq; + std::priority_queue max_pq; // not include global type. for (unsigned i = 1; i < ExecEnv::GetInstance()->mem_tracker_limiter_pool.size(); ++i) { std::lock_guard l( @@ -414,7 +427,7 @@ void MemTrackerLimiter::make_top_consumption_snapshots(std::vector* snapshots) { +void MemTrackerLimiter::make_all_trackers_snapshots(std::vector* snapshots) { for (auto& i : ExecEnv::GetInstance()->mem_tracker_limiter_pool) { std::lock_guard l(i.group_lock); for (auto trackerWptr : i.trackers) { @@ -426,25 +439,25 @@ void MemTrackerLimiter::make_all_trackers_snapshots(std::vector* snapshots) { +void MemTrackerLimiter::make_all_memory_state_snapshots(std::vector* snapshots) { make_process_snapshots(snapshots); make_all_trackers_snapshots(snapshots); - MemTracker::make_all_trackers_snapshots(snapshots); + make_all_reserved_trackers_snapshots(snapshots); } -std::string MemTrackerLimiter::log_usage(MemTracker::Snapshot snapshot) { - return fmt::format( - "MemTrackerLimiter Label={}, Type={}, Limit={}({} B), Used={}({} B), Peak={}({} B)", - snapshot.label, snapshot.type, print_bytes(snapshot.limit), snapshot.limit, - print_bytes(snapshot.cur_consumption), snapshot.cur_consumption, - print_bytes(snapshot.peak_consumption), snapshot.peak_consumption); +std::string MemTrackerLimiter::log_usage(Snapshot snapshot) { + return fmt::format("MemTracker Label={}, Type={}, Limit={}({} B), Used={}({} B), Peak={}({} B)", + snapshot.label, snapshot.type, MemCounter::print_bytes(snapshot.limit), + snapshot.limit, MemCounter::print_bytes(snapshot.cur_consumption), + snapshot.cur_consumption, MemCounter::print_bytes(snapshot.peak_consumption), + snapshot.peak_consumption); } -std::string MemTrackerLimiter::type_log_usage(MemTracker::Snapshot snapshot) { +std::string MemTrackerLimiter::type_log_usage(Snapshot snapshot) { return fmt::format("Type={}, Used={}({} B), Peak={}({} B)", snapshot.type, - print_bytes(snapshot.cur_consumption), snapshot.cur_consumption, - print_bytes(snapshot.peak_consumption), snapshot.peak_consumption); + MemCounter::print_bytes(snapshot.cur_consumption), snapshot.cur_consumption, + MemCounter::print_bytes(snapshot.peak_consumption), + snapshot.peak_consumption); } std::string MemTrackerLimiter::type_detail_usage(const std::string& msg, Type type) { @@ -468,16 +481,6 @@ void MemTrackerLimiter::print_log_usage(const std::string& msg) { std::string detail = msg; detail += "\nProcess Memory Summary:\n " + GlobalMemoryArbitrator::process_mem_log_str(); detail += "\nMemory Tracker Summary: " + log_usage(); - std::string child_trackers_usage; - std::vector snapshots; - MemTracker::make_group_snapshot(&snapshots, _group_num, _label); - for (const auto& snapshot : snapshots) { - child_trackers_usage += "\n " + MemTracker::log_usage(snapshot); - } - if (!child_trackers_usage.empty()) { - detail += child_trackers_usage; - } - LOG(WARNING) << detail; } } @@ -485,25 +488,24 @@ void MemTrackerLimiter::print_log_usage(const std::string& msg) { std::string MemTrackerLimiter::log_process_usage_str() { std::string detail; detail += "\nProcess Memory Summary:\n " + GlobalMemoryArbitrator::process_mem_log_str(); - std::vector snapshots; + std::vector snapshots; MemTrackerLimiter::make_process_snapshots(&snapshots); MemTrackerLimiter::make_type_snapshots(&snapshots, MemTrackerLimiter::Type::GLOBAL); MemTrackerLimiter::make_top_consumption_snapshots(&snapshots, 15); - - // Add additional tracker printed when memory exceeds limit. - snapshots.emplace_back( - ExecEnv::GetInstance()->memtable_memory_limiter()->mem_tracker()->make_snapshot()); + MemTrackerLimiter::make_all_reserved_trackers_snapshots(&snapshots); detail += "\nMemory Tracker Summary:"; for (const auto& snapshot : snapshots) { - if (snapshot.label.empty() && snapshot.parent_label.empty()) { + if (snapshot.label.empty()) { detail += "\n " + MemTrackerLimiter::type_log_usage(snapshot); - } else if (snapshot.parent_label.empty()) { - detail += "\n " + MemTrackerLimiter::log_usage(snapshot); } else { - detail += "\n " + MemTracker::log_usage(snapshot); + detail += "\n " + MemTrackerLimiter::log_usage(snapshot); } } + + // Add additional tracker printed when memory exceeds limit. + detail += "\n " + + ExecEnv::GetInstance()->memtable_memory_limiter()->mem_tracker()->log_usage(); return detail; } @@ -519,8 +521,8 @@ std::string MemTrackerLimiter::tracker_limit_exceeded_str() { std::string err_msg = fmt::format( "memory tracker limit exceeded, tracker label:{}, type:{}, limit " "{}, peak used {}, current used {}. backend {}, {}.", - label(), type_string(_type), print_bytes(limit()), - print_bytes(_consumption->peak_value()), print_bytes(_consumption->current_value()), + label(), type_string(_type), MemCounter::print_bytes(limit()), + MemCounter::print_bytes(peak_consumption()), MemCounter::print_bytes(consumption()), BackendOptions::get_localhost(), GlobalMemoryArbitrator::process_memory_used_str()); if (_type == Type::QUERY || _type == Type::LOAD) { err_msg += fmt::format( @@ -545,7 +547,7 @@ int64_t MemTrackerLimiter::free_top_memory_query(int64_t min_free_mem, "Process memory not enough, cancel top memory used {}: " "<{}> consumption {}, backend {}, {}. Execute again " "after enough memory, details see be.INFO.", - type_string(type), label, print_bytes(mem_consumption), + type_string(type), label, MemCounter::print_bytes(mem_consumption), BackendOptions::get_localhost(), cancel_reason); }, profile, GCType::PROCESS); @@ -666,7 +668,7 @@ int64_t MemTrackerLimiter::free_top_overcommit_query(int64_t min_free_mem, "Process memory not enough, cancel top memory overcommit {}: " "<{}> consumption {}, backend {}, {}. Execute again " "after enough memory, details see be.INFO.", - type_string(type), label, print_bytes(mem_consumption), + type_string(type), label, MemCounter::print_bytes(mem_consumption), BackendOptions::get_localhost(), cancel_reason); }, profile, GCType::PROCESS); diff --git a/be/src/runtime/memory/mem_tracker_limiter.h b/be/src/runtime/memory/mem_tracker_limiter.h index 344f3dc92b6670..faf354cca4cbf3 100644 --- a/be/src/runtime/memory/mem_tracker_limiter.h +++ b/be/src/runtime/memory/mem_tracker_limiter.h @@ -20,28 +20,29 @@ #include #include #include -#include #include +#include // IWYU pragma: no_include #include // IWYU pragma: keep #include #include #include -#include #include #include #include #include "common/config.h" #include "common/status.h" -#include "runtime/memory/mem_tracker.h" +#include "runtime/memory/mem_counter.h" +#include "runtime/query_statistics.h" #include "util/string_util.h" #include "util/uid_util.h" namespace doris { class RuntimeProfile; +class MemTrackerLimiter; constexpr size_t MEM_TRACKER_GROUP_NUM = 1000; @@ -58,78 +59,115 @@ struct TrackerLimiterGroup { std::mutex group_lock; }; -// Track and limit the memory usage of process and query. -// Contains an limit, arranged into a tree structure. -// -// Automatically track every once malloc/free of the system memory allocator (Currently, based on TCMlloc hook). -// Put Query MemTrackerLimiter into SCOPED_ATTACH_TASK when the thread starts,all memory used by this thread -// will be recorded on this Query, otherwise it will be recorded in Orphan Tracker by default. -class MemTrackerLimiter final : public MemTracker { +/* + * Track and limit the memory usage of process and query. + * + * Usually, put Query MemTrackerLimiter into SCOPED_ATTACH_TASK when the thread starts, + * all memory used by this thread will be recorded on this Query. + * + * This class is thread-safe. +*/ +class MemTrackerLimiter final { public: + /* + * Part 1, Type definition + */ + // TODO There are more and more GC codes and there should be a separate manager class. enum class GCType { PROCESS = 0, WORK_LOAD_GROUP = 1 }; - inline static std::unordered_map> TypeMemSum = { - {Type::GLOBAL, std::make_shared()}, - {Type::QUERY, std::make_shared()}, - {Type::LOAD, std::make_shared()}, - {Type::COMPACTION, std::make_shared()}, - {Type::SCHEMA_CHANGE, std::make_shared()}, - {Type::OTHER, std::make_shared()}}; + enum class Type { + GLOBAL = 0, // Life cycle is the same as the process, e.g. Cache and default Orphan + QUERY = 1, // Count the memory consumption of all Query tasks. + LOAD = 2, // Count the memory consumption of all Load tasks. + COMPACTION = 3, // Count the memory consumption of all Base and Cumulative tasks. + SCHEMA_CHANGE = 4, // Count the memory consumption of all SchemaChange tasks. + OTHER = 5, + }; + + struct Snapshot { + std::string type; + std::string label; + int64_t limit = 0; + int64_t cur_consumption = 0; + int64_t peak_consumption = 0; + + bool operator<(const Snapshot& rhs) const { return cur_consumption < rhs.cur_consumption; } + }; + + // Corresponding to MemTrackerLimiter::Type. + // MemCounter contains atomic variables, which are not allowed to be copied or moved. + inline static std::unordered_map TypeMemSum; + + /* + * Part 2, Constructors and property methods + */ -public: static std::shared_ptr create_shared( MemTrackerLimiter::Type type, const std::string& label = std::string(), int64_t byte_limit = -1); // byte_limit equal to -1 means no consumption limit, only participate in process memory statistics. MemTrackerLimiter(Type type, const std::string& label, int64_t byte_limit); - ~MemTrackerLimiter() override; - - static std::string gc_type_string(GCType type) { - switch (type) { - case GCType::PROCESS: - return "process"; - case GCType::WORK_LOAD_GROUP: - return "work load group"; - default: - LOG(FATAL) << "not match gc type:" << static_cast(type); - } - LOG(FATAL) << "__builtin_unreachable"; - __builtin_unreachable(); - } + ~MemTrackerLimiter(); - void set_consumption() { LOG(FATAL) << "MemTrackerLimiter set_consumption not supported"; } + Type type() const { return _type; } + const std::string& label() const { return _label; } + std::shared_ptr get_query_statistics() { return _query_statistics; } int64_t group_num() const { return _group_num; } bool has_limit() const { return _limit >= 0; } int64_t limit() const { return _limit; } bool limit_exceeded() const { return _limit >= 0 && _limit < consumption(); } + Status check_limit(int64_t bytes = 0); + bool is_overcommit_tracker() const { return type() == Type::QUERY || type() == Type::LOAD; } + bool is_query_cancelled() { return _is_query_cancelled; } + void set_is_query_cancelled(bool is_cancelled) { _is_query_cancelled.store(is_cancelled); } - bool try_consume(int64_t bytes) const { + // Iterator into mem_tracker_limiter_pool for this object. Stored to have O(1) remove. + std::list>::iterator wg_tracker_limiter_group_it; + + /* + * Part 3, Memory tracking method (use carefully!) + * + * Note: Only memory not allocated by Doris Allocator can be tracked by manually calling consume() and release(). + * Memory allocated by Doris Allocator needs to be tracked using SCOPED_ATTACH_TASK or + * SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER. + */ + + int64_t consumption() const { return _mem_counter.current_value(); } + int64_t peak_consumption() const { return _mem_counter.peak_value(); } + + void consume(int64_t bytes) { + _mem_counter.add(bytes); + if (_query_statistics) { + _query_statistics->set_max_peak_memory_bytes(peak_consumption()); + _query_statistics->set_current_used_memory_bytes(consumption()); + } + } + + void consume_no_update_peak(int64_t bytes) { _mem_counter.add_no_update_peak(bytes); } + + void release(int64_t bytes) { _mem_counter.sub(bytes); } + + bool try_consume(int64_t bytes) { if (UNLIKELY(bytes == 0)) { return true; } - bool st = true; + bool rt = true; if (is_overcommit_tracker() && !config::enable_query_memory_overcommit) { - st = _consumption->try_add(bytes, _limit); + rt = _mem_counter.try_add(bytes, _limit); } else { - _consumption->add(bytes); + _mem_counter.add(bytes); } - if (st && _query_statistics) { - _query_statistics->set_max_peak_memory_bytes(_consumption->peak_value()); - _query_statistics->set_current_used_memory_bytes(_consumption->current_value()); + if (rt && _query_statistics) { + _query_statistics->set_max_peak_memory_bytes(peak_consumption()); + _query_statistics->set_current_used_memory_bytes(consumption()); } - return st; + return rt; } - Status check_limit(int64_t bytes = 0); - bool is_overcommit_tracker() const { return type() == Type::QUERY || type() == Type::LOAD; } - - bool is_query_cancelled() { return _is_query_cancelled; } + void set_consumption(int64_t bytes) { _mem_counter.set(bytes); } - void set_is_query_cancelled(bool is_cancelled) { _is_query_cancelled.store(is_cancelled); } - -public: // Transfer 'bytes' of consumption from this tracker to 'dst'. void transfer_to(int64_t size, MemTrackerLimiter* dst) { if (label() == dst->label()) { @@ -139,21 +177,50 @@ class MemTrackerLimiter final : public MemTracker { dst->cache_consume(size); } + // If need to consume the tracker frequently, use it + void cache_consume(int64_t bytes); + + /* + * Part 4, Reserved memory tracking method + */ + + int64_t reserved_consumption() const { return _reserved_counter.current_value(); } + int64_t reserved_peak_consumption() const { return _reserved_counter.peak_value(); } + + bool try_reserve(int64_t bytes) { + bool rt = try_consume(bytes); + if (rt) { + _reserved_counter.add(bytes); + } + return rt; + } + + void release_reserved(int64_t bytes) { + _reserved_counter.sub(bytes); + DCHECK(reserved_consumption() >= 0); + } + + Snapshot make_reserved_trackers_snapshot() const; + static void make_all_reserved_trackers_snapshots(std::vector* snapshots); + + /* + * Part 4, Memory snapshot and log method + */ + static void refresh_global_counter(); static void clean_tracker_limiter_group(); - Snapshot make_snapshot() const override; + Snapshot make_snapshot() const; // Returns a list of all the valid tracker snapshots. - static void make_process_snapshots(std::vector* snapshots); - static void make_type_snapshots(std::vector* snapshots, Type type); - static void make_all_trackers_snapshots(std::vector* snapshots); - static void make_all_memory_state_snapshots(std::vector* snapshots); - static void make_top_consumption_snapshots(std::vector* snapshots, - int top_num); - - static std::string log_usage(MemTracker::Snapshot snapshot); + static void make_process_snapshots(std::vector* snapshots); + static void make_type_snapshots(std::vector* snapshots, Type type); + static void make_all_trackers_snapshots(std::vector* snapshots); + static void make_all_memory_state_snapshots(std::vector* snapshots); + static void make_top_consumption_snapshots(std::vector* snapshots, int top_num); + + static std::string log_usage(Snapshot snapshot); std::string log_usage() const { return log_usage(make_snapshot()); } - static std::string type_log_usage(MemTracker::Snapshot snapshot); + static std::string type_log_usage(Snapshot snapshot); static std::string type_detail_usage(const std::string& msg, Type type); void print_log_usage(const std::string& msg); void enable_print_log_usage() { _enable_print_log_usage = true; } @@ -161,6 +228,12 @@ class MemTrackerLimiter final : public MemTracker { static void enable_print_log_process_usage() { _enable_print_log_process_usage = true; } static std::string log_process_usage_str(); static void print_log_process_usage(); + // Log the memory usage when memory limit is exceeded. + std::string tracker_limit_exceeded_str(); + + /* + * Part 5, Memory GC method + */ // Start canceling from the query with the largest memory usage until the memory of min_free_mem size is freed. // cancel_reason recorded when gc is triggered, for log printing. @@ -191,6 +264,53 @@ class MemTrackerLimiter final : public MemTracker { return free_top_overcommit_query(min_free_mem, cancel_reason, profile, Type::LOAD); } + /* + * Part 6, Memory debug method + */ + + void add_address_sanitizers(void* buf, size_t size); + void remove_address_sanitizers(void* buf, size_t size); + bool is_group_commit_load {false}; + +private: + /* + * Part 7, Private method + */ + + static std::string type_string(Type type) { + switch (type) { + case Type::GLOBAL: + return "global"; + case Type::QUERY: + return "query"; + case Type::LOAD: + return "load"; + case Type::COMPACTION: + return "compaction"; + case Type::SCHEMA_CHANGE: + return "schema_change"; + case Type::OTHER: + return "other"; + default: + LOG(FATAL) << "not match type of mem tracker limiter :" << static_cast(type); + } + LOG(FATAL) << "__builtin_unreachable"; + __builtin_unreachable(); + } + + static std::string gc_type_string(GCType type) { + switch (type) { + case GCType::PROCESS: + return "process"; + case GCType::WORK_LOAD_GROUP: + return "work load group"; + default: + LOG(FATAL) << "not match gc type:" << static_cast(type); + } + LOG(FATAL) << "__builtin_unreachable"; + __builtin_unreachable(); + } + // only for Type::QUERY or Type::LOAD. static TUniqueId label_to_queryid(const std::string& label) { if (label.find("#Id=") == std::string::npos) { @@ -202,40 +322,23 @@ class MemTrackerLimiter final : public MemTracker { return querytid; } - // Log the memory usage when memory limit is exceeded. - std::string tracker_limit_exceeded_str(); - -#ifndef NDEBUG - void add_address_sanitizers(void* buf, size_t size); - void remove_address_sanitizers(void* buf, size_t size); - std::string print_address_sanitizers(); - bool is_group_commit_load {false}; -#endif - - std::string debug_string() override { - std::stringstream msg; - msg << "limit: " << _limit << "; " - << "consumption: " << _consumption->current_value() << "; " - << "label: " << _label << "; " - << "type: " << type_string(_type) << "; "; - return msg.str(); - } - - // Iterator into mem_tracker_limiter_pool for this object. Stored to have O(1) remove. - std::list>::iterator wg_tracker_limiter_group_it; - -private: - friend class ThreadMemTrackerMgr; - - // If need to consume the tracker frequently, use it - void cache_consume(int64_t bytes); - // When the accumulated untracked memory value exceeds the upper limit, // the current value is returned and set to 0. // Thread safety. int64_t add_untracked_mem(int64_t bytes); -private: + /* + * Part 8, Property definition + */ + + Type _type; + + // label used in the make snapshot, not guaranteed unique. + std::string _label; + + MemCounter _mem_counter; + MemCounter _reserved_counter; + // Limit on memory consumption, in bytes. int64_t _limit; @@ -253,16 +356,18 @@ class MemTrackerLimiter final : public MemTracker { bool _enable_print_log_usage = false; static std::atomic _enable_print_log_process_usage; -#ifndef NDEBUG + std::shared_ptr _query_statistics = nullptr; + struct AddressSanitizer { size_t size; std::string stack_trace; }; + std::string print_address_sanitizers(); + bool open_memory_tracker_inaccurate_detect(); std::mutex _address_sanitizers_mtx; std::unordered_map _address_sanitizers; std::vector _error_address_sanitizers; -#endif }; inline int64_t MemTrackerLimiter::add_untracked_mem(int64_t bytes) { @@ -274,7 +379,9 @@ inline int64_t MemTrackerLimiter::add_untracked_mem(int64_t bytes) { } inline void MemTrackerLimiter::cache_consume(int64_t bytes) { - if (bytes == 0) return; + if (bytes == 0) { + return; + } int64_t consume_bytes = add_untracked_mem(bytes); consume(consume_bytes); } @@ -283,9 +390,10 @@ inline Status MemTrackerLimiter::check_limit(int64_t bytes) { if (bytes <= 0 || (is_overcommit_tracker() && config::enable_query_memory_overcommit)) { return Status::OK(); } - if (_limit > 0 && _consumption->current_value() + bytes > _limit) { - return Status::MemoryLimitExceeded(fmt::format( - "failed alloc size {}, {}", print_bytes(bytes), tracker_limit_exceeded_str())); + if (_limit > 0 && consumption() + bytes > _limit) { + return Status::MemoryLimitExceeded(fmt::format("failed alloc size {}, {}", + MemCounter::print_bytes(bytes), + tracker_limit_exceeded_str())); } return Status::OK(); } diff --git a/be/src/runtime/memory/thread_mem_tracker_mgr.cpp b/be/src/runtime/memory/thread_mem_tracker_mgr.cpp index 33dd0d41822ae1..d036564528534c 100644 --- a/be/src/runtime/memory/thread_mem_tracker_mgr.cpp +++ b/be/src/runtime/memory/thread_mem_tracker_mgr.cpp @@ -46,7 +46,7 @@ void ThreadMemTrackerMgr::attach_limiter_tracker( DCHECK(mem_tracker); CHECK(init()); flush_untracked_mem(); - _reserved_mem_stack.push_back(_reserved_mem); + _last_attach_snapshots_stack.push_back({_reserved_mem, _consumer_tracker_stack}); if (_reserved_mem != 0) { // _untracked_mem temporary store bytes that not synchronized to process reserved memory, // but bytes have been subtracted from thread _reserved_mem. @@ -54,6 +54,7 @@ void ThreadMemTrackerMgr::attach_limiter_tracker( _reserved_mem = 0; _untracked_mem = 0; } + _consumer_tracker_stack.clear(); _limiter_tracker = mem_tracker; } @@ -62,9 +63,10 @@ void ThreadMemTrackerMgr::detach_limiter_tracker( CHECK(init()); flush_untracked_mem(); release_reserved(); - DCHECK(!_reserved_mem_stack.empty()); - _reserved_mem = _reserved_mem_stack.back(); - _reserved_mem_stack.pop_back(); + DCHECK(!_last_attach_snapshots_stack.empty()); + _reserved_mem = _last_attach_snapshots_stack.back().reserved_mem; + _consumer_tracker_stack = _last_attach_snapshots_stack.back().consumer_tracker_stack; + _last_attach_snapshots_stack.pop_back(); _limiter_tracker = old_mem_tracker; } diff --git a/be/src/runtime/memory/thread_mem_tracker_mgr.h b/be/src/runtime/memory/thread_mem_tracker_mgr.h index bb0091f2e6d6fb..fd14750d8b8ebc 100644 --- a/be/src/runtime/memory/thread_mem_tracker_mgr.h +++ b/be/src/runtime/memory/thread_mem_tracker_mgr.h @@ -106,7 +106,7 @@ class ThreadMemTrackerMgr { std::string print_debug_string() { fmt::memory_buffer consumer_tracker_buf; for (const auto& v : _consumer_tracker_stack) { - fmt::format_to(consumer_tracker_buf, "{}, ", MemTracker::log_usage(v->make_snapshot())); + fmt::format_to(consumer_tracker_buf, "{}, ", v->log_usage()); } return fmt::format( "ThreadMemTrackerMgr debug, _untracked_mem:{}, " @@ -119,6 +119,11 @@ class ThreadMemTrackerMgr { int64_t reserved_mem() const { return _reserved_mem; } private: + struct LastAttachSnapshot { + int64_t reserved_mem = 0; + std::vector consumer_tracker_stack; + }; + // is false: ExecEnv::ready() = false when thread local is initialized bool _init = false; // Cache untracked mem. @@ -126,9 +131,10 @@ class ThreadMemTrackerMgr { int64_t _old_untracked_mem = 0; int64_t _reserved_mem = 0; + // SCOPED_ATTACH_TASK cannot be nested, but SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER can continue to be used, // so `attach_limiter_tracker` may be nested. - std::vector _reserved_mem_stack; + std::vector _last_attach_snapshots_stack; std::string _failed_consume_msg = std::string(); // If true, the Allocator will wait for the GC to free memory if it finds that the memory exceed limit. @@ -194,6 +200,7 @@ inline void ThreadMemTrackerMgr::consume(int64_t size, int skip_large_memory_che // if _untracked_mem less than -SYNC_PROC_RESERVED_INTERVAL_BYTES, increase process reserved memory. if (std::abs(_untracked_mem) >= SYNC_PROC_RESERVED_INTERVAL_BYTES) { doris::GlobalMemoryArbitrator::release_process_reserved_memory(_untracked_mem); + _limiter_tracker->release_reserved(_untracked_mem); _untracked_mem = 0; } return; @@ -205,6 +212,7 @@ inline void ThreadMemTrackerMgr::consume(int64_t size, int skip_large_memory_che size -= _reserved_mem; doris::GlobalMemoryArbitrator::release_process_reserved_memory(_reserved_mem + _untracked_mem); + _limiter_tracker->release_reserved(_reserved_mem + _untracked_mem); _reserved_mem = 0; _untracked_mem = 0; } @@ -277,7 +285,7 @@ inline doris::Status ThreadMemTrackerMgr::try_reserve(int64_t size) { // if _reserved_mem not equal to 0, repeat reserve, // _untracked_mem store bytes that not synchronized to process reserved memory. flush_untracked_mem(); - if (!_limiter_tracker->try_consume(size)) { + if (!_limiter_tracker->try_reserve(size)) { auto err_msg = fmt::format( "reserve memory failed, size: {}, because memory tracker consumption: {}, limit: " "{}", @@ -289,14 +297,16 @@ inline doris::Status ThreadMemTrackerMgr::try_reserve(int64_t size) { if (!wg_ptr->add_wg_refresh_interval_memory_growth(size)) { auto err_msg = fmt::format("reserve memory failed, size: {}, because {}", size, wg_ptr->memory_debug_string()); - _limiter_tracker->release(size); // rollback + _limiter_tracker->release(size); // rollback + _limiter_tracker->release_reserved(size); // rollback return doris::Status::MemoryLimitExceeded(err_msg); } } if (!doris::GlobalMemoryArbitrator::try_reserve_process_memory(size)) { auto err_msg = fmt::format("reserve memory failed, size: {}, because {}", size, GlobalMemoryArbitrator::process_mem_log_str()); - _limiter_tracker->release(size); // rollback + _limiter_tracker->release(size); // rollback + _limiter_tracker->release_reserved(size); // rollback if (wg_ptr) { wg_ptr->sub_wg_refresh_interval_memory_growth(size); // rollback } @@ -310,6 +320,7 @@ inline void ThreadMemTrackerMgr::release_reserved() { if (_reserved_mem != 0) { doris::GlobalMemoryArbitrator::release_process_reserved_memory(_reserved_mem + _untracked_mem); + _limiter_tracker->release_reserved(_reserved_mem + _untracked_mem); _limiter_tracker->release(_reserved_mem); auto wg_ptr = _wg_wptr.lock(); if (wg_ptr) { diff --git a/be/src/runtime/query_context.cpp b/be/src/runtime/query_context.cpp index b9430d3899b8d3..055a78471e3d41 100644 --- a/be/src/runtime/query_context.cpp +++ b/be/src/runtime/query_context.cpp @@ -151,9 +151,9 @@ QueryContext::~QueryContext() { mem_tracker_msg = fmt::format( ", deregister query/load memory tracker, queryId={}, Limit={}, CurrUsed={}, " "PeakUsed={}", - print_id(_query_id), MemTracker::print_bytes(query_mem_tracker->limit()), - MemTracker::print_bytes(query_mem_tracker->consumption()), - MemTracker::print_bytes(query_mem_tracker->peak_consumption())); + print_id(_query_id), MemCounter::print_bytes(query_mem_tracker->limit()), + MemCounter::print_bytes(query_mem_tracker->consumption()), + MemCounter::print_bytes(query_mem_tracker->peak_consumption())); } uint64_t group_id = 0; if (_workload_group) { @@ -368,9 +368,6 @@ void QueryContext::add_fragment_profile( void QueryContext::_report_query_profile() { std::lock_guard lg(_profile_mutex); - LOG_INFO( - "Pipeline x query context, register query profile, query {}, fragment profile count {}", - print_id(_query_id), _profile_map.size()); for (auto& [fragment_id, fragment_profile] : _profile_map) { std::shared_ptr load_channel_profile = nullptr; diff --git a/be/src/runtime/query_context.h b/be/src/runtime/query_context.h index 7a6d6d3c53d49e..d1d78573923a7e 100644 --- a/be/src/runtime/query_context.h +++ b/be/src/runtime/query_context.h @@ -50,8 +50,6 @@ class PipelineFragmentContext; struct ReportStatusRequest { const Status status; std::vector runtime_states; - RuntimeProfile* profile = nullptr; - RuntimeProfile* load_channel_profile = nullptr; bool done; TNetworkAddress coord_addr; TUniqueId query_id; diff --git a/be/src/runtime/routine_load/routine_load_task_executor.cpp b/be/src/runtime/routine_load/routine_load_task_executor.cpp index b63495df837d1a..2c69b8a58704bf 100644 --- a/be/src/runtime/routine_load/routine_load_task_executor.cpp +++ b/be/src/runtime/routine_load/routine_load_task_executor.cpp @@ -315,7 +315,7 @@ Status RoutineLoadTaskExecutor::submit_task(const TRoutineLoadTask& task) { bool RoutineLoadTaskExecutor::_reach_memory_limit() { bool is_exceed_soft_mem_limit = GlobalMemoryArbitrator::is_exceed_soft_mem_limit(); auto current_load_mem_value = - MemTrackerLimiter::TypeMemSum[MemTrackerLimiter::Type::LOAD]->current_value(); + MemTrackerLimiter::TypeMemSum[MemTrackerLimiter::Type::LOAD].current_value(); if (is_exceed_soft_mem_limit || current_load_mem_value > _load_mem_limit) { LOG(INFO) << "is_exceed_soft_mem_limit: " << is_exceed_soft_mem_limit << " current_load_mem_value: " << current_load_mem_value diff --git a/be/src/runtime/runtime_filter_mgr.cpp b/be/src/runtime/runtime_filter_mgr.cpp index d2b55d86bc6bd4..01fcf851321fc1 100644 --- a/be/src/runtime/runtime_filter_mgr.cpp +++ b/be/src/runtime/runtime_filter_mgr.cpp @@ -48,8 +48,7 @@ RuntimeFilterMgr::RuntimeFilterMgr(const UniqueId& query_id, RuntimeFilterParams _state = state; _state->runtime_filter_mgr = this; _query_mem_tracker = query_mem_tracker; - _tracker = std::make_unique("RuntimeFilterMgr(experimental)", - _query_mem_tracker.get()); + _tracker = std::make_unique("RuntimeFilterMgr(experimental)"); } RuntimeFilterMgr::~RuntimeFilterMgr() { @@ -264,8 +263,7 @@ Status RuntimeFilterMergeControllerEntity::init(UniqueId query_id, const TRuntimeFilterParams& runtime_filter_params, const TQueryOptions& query_options) { _query_id = query_id; - _mem_tracker = std::make_shared("RuntimeFilterMergeControllerEntity(experimental)", - ExecEnv::GetInstance()->details_mem_tracker_set()); + _mem_tracker = std::make_shared("RuntimeFilterMergeControllerEntity(experimental)"); SCOPED_CONSUME_MEM_TRACKER(_mem_tracker.get()); if (runtime_filter_params.__isset.rid_to_runtime_filter) { for (const auto& filterid_to_desc : runtime_filter_params.rid_to_runtime_filter) { diff --git a/be/src/runtime/runtime_query_statistics_mgr.cpp b/be/src/runtime/runtime_query_statistics_mgr.cpp index 77fd80cd528998..75dd4ed0321c44 100644 --- a/be/src/runtime/runtime_query_statistics_mgr.cpp +++ b/be/src/runtime/runtime_query_statistics_mgr.cpp @@ -117,7 +117,7 @@ TReportExecStatusParams RuntimeQueryStatisticsMgr::create_report_exec_status_par int32_t fragment_id = entry.first; const std::vector>& fragment_profile = entry.second; std::vector detailed_params; - + bool is_first = true; for (auto pipeline_profile : fragment_profile) { if (pipeline_profile == nullptr) { auto msg = fmt::format("Register fragment profile {} {} failed, profile is null", @@ -129,6 +129,9 @@ TReportExecStatusParams RuntimeQueryStatisticsMgr::create_report_exec_status_par TDetailedReportParams tmp; THRIFT_MOVE_VALUES(tmp, profile, *pipeline_profile); + // First profile is fragment level + tmp.__set_is_fragment_level(is_first); + is_first = false; // tmp.fragment_instance_id is not needed for pipeline x detailed_params.push_back(std::move(tmp)); } diff --git a/be/src/runtime/runtime_state.h b/be/src/runtime/runtime_state.h index f43d0a163dfdaa..90cf1bc34bdeca 100644 --- a/be/src/runtime/runtime_state.h +++ b/be/src/runtime/runtime_state.h @@ -490,6 +490,18 @@ class RuntimeState { : 0; } + int partition_topn_max_partitions() const { + return _query_options.__isset.partition_topn_max_partitions + ? _query_options.partition_topn_max_partitions + : 1024; + } + + int partition_topn_per_partition_rows() const { + return _query_options.__isset.partition_topn_pre_partition_rows + ? _query_options.partition_topn_pre_partition_rows + : 1000; + } + int64_t parallel_scan_min_rows_per_scanner() const { return _query_options.__isset.parallel_scan_min_rows_per_scanner ? _query_options.parallel_scan_min_rows_per_scanner diff --git a/be/src/runtime/stream_load/stream_load_context.cpp b/be/src/runtime/stream_load/stream_load_context.cpp index cec015fe92cc7b..dbabf7fa421a09 100644 --- a/be/src/runtime/stream_load/stream_load_context.cpp +++ b/be/src/runtime/stream_load/stream_load_context.cpp @@ -107,6 +107,8 @@ std::string StreamLoadContext::to_json() const { writer.Int64(read_data_cost_nanos / 1000000); writer.Key("WriteDataTimeMs"); writer.Int(write_data_cost_nanos / 1000000); + writer.Key("ReceiveDataTimeMs"); + writer.Int((receive_and_read_data_cost_nanos - read_data_cost_nanos) / 1000000); if (!group_commit) { writer.Key("CommitAndPublishTimeMs"); writer.Int64(commit_and_publish_txn_cost_nanos / 1000000); diff --git a/be/src/runtime/tablets_channel.cpp b/be/src/runtime/tablets_channel.cpp index 329366766f86af..4d458cd440fda2 100644 --- a/be/src/runtime/tablets_channel.cpp +++ b/be/src/runtime/tablets_channel.cpp @@ -446,7 +446,7 @@ void BaseTabletsChannel::refresh_profile() { { std::lock_guard l(_tablet_writers_lock); for (auto&& [tablet_id, writer] : _tablet_writers) { - int64_t write_mem = writer->mem_consumption(MemType::WRITE); + int64_t write_mem = writer->mem_consumption(MemType::WRITE_FINISHED); write_mem_usage += write_mem; int64_t flush_mem = writer->mem_consumption(MemType::FLUSH); flush_mem_usage += flush_mem; diff --git a/be/src/runtime/thread_context.h b/be/src/runtime/thread_context.h index ea842c12028665..19ebffa935494a 100644 --- a/be/src/runtime/thread_context.h +++ b/be/src/runtime/thread_context.h @@ -402,6 +402,10 @@ class QueryThreadContext { #endif } + std::shared_ptr get_memory_tracker() { return query_mem_tracker; } + + WorkloadGroupPtr get_workload_group_ptr() { return wg_wptr.lock(); } + TUniqueId query_id; std::shared_ptr query_mem_tracker; std::weak_ptr wg_wptr; diff --git a/be/src/runtime/workload_group/workload_group.cpp b/be/src/runtime/workload_group/workload_group.cpp index 85f79536b74ec3..6f3b51f09fd1f2 100644 --- a/be/src/runtime/workload_group/workload_group.cpp +++ b/be/src/runtime/workload_group/workload_group.cpp @@ -210,21 +210,21 @@ int64_t WorkloadGroup::gc_memory(int64_t need_free_mem, RuntimeProfile* profile, cancel_str = fmt::format( "MinorGC kill overcommit query, wg id:{}, name:{}, used:{}, limit:{}, " "backend:{}.", - _id, _name, MemTracker::print_bytes(used_memory), - MemTracker::print_bytes(_memory_limit), BackendOptions::get_localhost()); + _id, _name, MemCounter::print_bytes(used_memory), + MemCounter::print_bytes(_memory_limit), BackendOptions::get_localhost()); } else { if (_enable_memory_overcommit) { cancel_str = fmt::format( "FullGC release wg overcommit mem, wg id:{}, name:{}, " "used:{},limit:{},backend:{}.", - _id, _name, MemTracker::print_bytes(used_memory), - MemTracker::print_bytes(_memory_limit), BackendOptions::get_localhost()); + _id, _name, MemCounter::print_bytes(used_memory), + MemCounter::print_bytes(_memory_limit), BackendOptions::get_localhost()); } else { cancel_str = fmt::format( "GC wg for hard limit, wg id:{}, name:{}, used:{}, limit:{}, " "backend:{}.", - _id, _name, MemTracker::print_bytes(used_memory), - MemTracker::print_bytes(_memory_limit), BackendOptions::get_localhost()); + _id, _name, MemCounter::print_bytes(used_memory), + MemCounter::print_bytes(_memory_limit), BackendOptions::get_localhost()); } } auto cancel_top_overcommit_str = [cancel_str](int64_t mem_consumption, @@ -232,14 +232,14 @@ int64_t WorkloadGroup::gc_memory(int64_t need_free_mem, RuntimeProfile* profile, return fmt::format( "{} cancel top memory overcommit tracker <{}> consumption {}. details:{}, Execute " "again after enough memory, details see be.INFO.", - cancel_str, label, MemTracker::print_bytes(mem_consumption), + cancel_str, label, MemCounter::print_bytes(mem_consumption), GlobalMemoryArbitrator::process_limit_exceeded_errmsg_str()); }; auto cancel_top_usage_str = [cancel_str](int64_t mem_consumption, const std::string& label) { return fmt::format( "{} cancel top memory used tracker <{}> consumption {}. details:{}, Execute again " "after enough memory, details see be.INFO.", - cancel_str, label, MemTracker::print_bytes(mem_consumption), + cancel_str, label, MemCounter::print_bytes(mem_consumption), GlobalMemoryArbitrator::process_soft_limit_exceeded_errmsg_str()); }; diff --git a/be/src/runtime/workload_group/workload_group_manager.cpp b/be/src/runtime/workload_group/workload_group_manager.cpp index 32470fed5ab929..65a8e3685c80ed 100644 --- a/be/src/runtime/workload_group/workload_group_manager.cpp +++ b/be/src/runtime/workload_group/workload_group_manager.cpp @@ -232,9 +232,9 @@ void WorkloadGroupMgr::refresh_wg_weighted_memory_limit() { // check whether queries need to revoke memory for task group for (const auto& query_mem_tracker : wgs_mem_info[wg.first].tracker_snapshots) { debug_msg += fmt::format( - "\n MemTracker Label={}, Parent Label={}, Used={}, SpillThreshold={}, " + "\n MemTracker Label={}, Used={}, SpillThreshold={}, " "Peak={}", - query_mem_tracker->label(), query_mem_tracker->parent_label(), + query_mem_tracker->label(), PrettyPrinter::print(query_mem_tracker->consumption(), TUnit::BYTES), PrettyPrinter::print(query_spill_threshold, TUnit::BYTES), PrettyPrinter::print(query_mem_tracker->peak_consumption(), TUnit::BYTES)); diff --git a/be/src/service/brpc_service.cpp b/be/src/service/brpc_service.cpp index 4b5587741e7253..8cf0e330aa9c8c 100644 --- a/be/src/service/brpc_service.cpp +++ b/be/src/service/brpc_service.cpp @@ -83,6 +83,8 @@ Status BRpcService::start(int port, int num_threads) { sslOptions->default_cert.private_key = config::ssl_private_key_path; } + options.has_builtin_services = config::enable_brpc_builtin_services; + butil::EndPoint point; if (butil::str2endpoint(BackendOptions::get_service_bind_address(), port, &point) < 0) { return Status::InternalError("convert address failed, host={}, port={}", "[::0]", port); diff --git a/be/src/service/http_service.cpp b/be/src/service/http_service.cpp index f2c325bebc7806..9330867ded65a1 100644 --- a/be/src/service/http_service.cpp +++ b/be/src/service/http_service.cpp @@ -25,6 +25,7 @@ #include #include "cloud/cloud_compaction_action.h" +#include "cloud/cloud_delete_bitmap_action.h" #include "cloud/config.h" #include "cloud/injection_point_action.h" #include "common/config.h" @@ -47,6 +48,7 @@ #include "http/action/health_action.h" #include "http/action/http_stream.h" #include "http/action/jeprofile_actions.h" +#include "http/action/load_channel_action.h" #include "http/action/load_stream_action.h" #include "http/action/meta_action.h" #include "http/action/metrics_action.h" @@ -188,6 +190,10 @@ Status HttpService::start() { LoadStreamAction* load_stream_action = _pool.add(new LoadStreamAction(_env)); _ev_http_server->register_handler(HttpMethod::GET, "/api/load_streams", load_stream_action); + // Register BE LoadChannel action + LoadChannelAction* load_channel_action = _pool.add(new LoadChannelAction(_env)); + _ev_http_server->register_handler(HttpMethod::GET, "/api/load_channels", load_channel_action); + // Register Tablets Info action TabletsInfoAction* tablets_info_action = _pool.add(new TabletsInfoAction(_env, TPrivilegeHier::GLOBAL, TPrivilegeType::ADMIN)); @@ -406,6 +412,11 @@ void HttpService::register_cloud_handler(CloudStorageEngine& engine) { TPrivilegeHier::GLOBAL, TPrivilegeType::ADMIN)); _ev_http_server->register_handler(HttpMethod::GET, "/api/compaction/run_status", run_status_compaction_action); + CloudDeleteBitmapAction* count_delete_bitmap_action = + _pool.add(new CloudDeleteBitmapAction(DeleteBitmapActionType::COUNT_INFO, _env, engine, + TPrivilegeHier::GLOBAL, TPrivilegeType::ADMIN)); + _ev_http_server->register_handler(HttpMethod::GET, "/api/delete_bitmap/count", + count_delete_bitmap_action); #ifdef ENABLE_INJECTION_POINT InjectionPointAction* injection_point_action = _pool.add(new InjectionPointAction); _ev_http_server->register_handler(HttpMethod::GET, "/api/injection_point/{op}", diff --git a/be/src/service/point_query_executor.cpp b/be/src/service/point_query_executor.cpp index 0a27c415a48c0a..9719a672b8dff4 100644 --- a/be/src/service/point_query_executor.cpp +++ b/be/src/service/point_query_executor.cpp @@ -191,9 +191,9 @@ LookupConnectionCache* LookupConnectionCache::create_global_instance(size_t capa } RowCache::RowCache(int64_t capacity, int num_shards) - : LRUCachePolicyTrackingManual( - CachePolicy::CacheType::POINT_QUERY_ROW_CACHE, capacity, LRUCacheType::SIZE, - config::point_query_row_cache_stale_sweep_time_sec, num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::POINT_QUERY_ROW_CACHE, capacity, + LRUCacheType::SIZE, config::point_query_row_cache_stale_sweep_time_sec, + num_shards) {} // Create global instance of this class RowCache* RowCache::create_global_cache(int64_t capacity, uint32_t num_shards) { @@ -223,8 +223,8 @@ void RowCache::insert(const RowCacheKey& key, const Slice& value) { auto* row_cache_value = new RowCacheValue; row_cache_value->cache_value = cache_value; const std::string& encoded_key = key.encode(); - auto* handle = LRUCachePolicyTrackingManual::insert(encoded_key, row_cache_value, value.size, - value.size, CachePriority::NORMAL); + auto* handle = LRUCachePolicy::insert(encoded_key, row_cache_value, value.size, value.size, + CachePriority::NORMAL); // handle will released auto tmp = CacheHandle {this, handle}; } diff --git a/be/src/service/point_query_executor.h b/be/src/service/point_query_executor.h index 6c6fb28f95a378..b22dc5bfd1d73f 100644 --- a/be/src/service/point_query_executor.h +++ b/be/src/service/point_query_executor.h @@ -126,9 +126,9 @@ class Reusable { }; // RowCache is a LRU cache for row store -class RowCache : public LRUCachePolicyTrackingManual { +class RowCache : public LRUCachePolicy { public: - using LRUCachePolicyTrackingManual::insert; + using LRUCachePolicy::insert; // The cache key for row lru cache struct RowCacheKey { @@ -220,7 +220,7 @@ class RowCache : public LRUCachePolicyTrackingManual { // A cache used for prepare stmt. // One connection per stmt perf uuid -class LookupConnectionCache : public LRUCachePolicyTrackingManual { +class LookupConnectionCache : public LRUCachePolicy { public: static LookupConnectionCache* instance() { return ExecEnv::GetInstance()->get_lookup_connection_cache(); @@ -231,9 +231,9 @@ class LookupConnectionCache : public LRUCachePolicyTrackingManual { private: friend class PointQueryExecutor; LookupConnectionCache(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::LOOKUP_CONNECTION_CACHE, - capacity, LRUCacheType::NUMBER, - config::tablet_lookup_cache_stale_sweep_time_sec) {} + : LRUCachePolicy(CachePolicy::CacheType::LOOKUP_CONNECTION_CACHE, capacity, + LRUCacheType::NUMBER, + config::tablet_lookup_cache_stale_sweep_time_sec) {} static std::string encode_key(__int128_t cache_id) { fmt::memory_buffer buffer; diff --git a/be/src/util/algorithm_util.h b/be/src/util/algorithm_util.h new file mode 100644 index 00000000000000..acddd3be3a3642 --- /dev/null +++ b/be/src/util/algorithm_util.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "common/status.h" +namespace doris { +class AlgoUtil { +public: + // descent the value step by step not linear continuity + // If the result is linear continuity, then the value will changed very quickly and will cost + // a lot of CPU and cache will not stable and will hold some lock. + // Its better to use step num to be 10, do not use 3, the divide value is not stable. + // For example, if step num is 3, then the result will be 0.33333... 0.66666..., the double value + // is not stable. + static double descent_by_step(int step_num, int64_t low_bound, int64_t high_bound, + int64_t current) { + if (current <= low_bound) { + return 1; + } + if (current >= high_bound) { + return 0; + } + if (high_bound <= low_bound) { + // Invalid + return 0; + } + // Use floor value, so that the step size is a little smaller than the actual value. + // And then the used step will be a little larger than the actual value. + int64_t step_size = (int64_t)std::floor((high_bound - low_bound) / (step_num * 1.0)); + int64_t used_step = (int64_t)std::ceil((current - low_bound) / (step_size * 1.0)); + // Then the left step is smaller than actual value. + // This elimation algo will elimate more cache than actual. + int64_t left_step = step_num - used_step; + return left_step / (step_num * 1.0); + } +}; +} // namespace doris \ No newline at end of file diff --git a/be/src/util/doris_metrics.cpp b/be/src/util/doris_metrics.cpp index bccada05bf1920..4ec0b1370e6d3b 100644 --- a/be/src/util/doris_metrics.cpp +++ b/be/src/util/doris_metrics.cpp @@ -332,7 +332,7 @@ void DorisMetrics::_update_process_thread_num() { std::filesystem::directory_iterator dict_iter("/proc/self/task/", ec); if (ec) { LOG(WARNING) << "failed to count thread num: " << ec.message(); - process_fd_num_used->set_value(0); + process_thread_num->set_value(0); return; } int64_t count = diff --git a/be/src/util/jsonb_document.h b/be/src/util/jsonb_document.h index 8a95ccef8d9a40..2a9cf8a8191caa 100644 --- a/be/src/util/jsonb_document.h +++ b/be/src/util/jsonb_document.h @@ -345,6 +345,22 @@ struct leg_info { ///type: 0 is member 1 is array unsigned int type; + + bool to_string(std::string* str) const { + if (type == MEMBER_CODE) { + str->push_back(BEGIN_MEMBER); + str->append(leg_ptr, leg_len); + return true; + } else if (type == ARRAY_CODE) { + str->push_back(BEGIN_ARRAY); + std::string int_str = std::to_string(array_index); + str->append(int_str); + str->push_back(END_ARRAY); + return true; + } else { + return false; + } + } }; class JsonbPath { @@ -362,6 +378,19 @@ class JsonbPath { leg_vector.emplace_back(leg.release()); } + void pop_leg_from_leg_vector() { leg_vector.pop_back(); } + + bool to_string(std::string* res) const { + res->push_back(SCOPE); + for (const auto& leg : leg_vector) { + auto valid = leg->to_string(res); + if (!valid) { + return false; + } + } + return true; + } + size_t get_leg_vector_size() { return leg_vector.size(); } leg_info* get_leg_from_leg_vector(size_t i) { return leg_vector[i].get(); } diff --git a/be/src/util/obj_lru_cache.cpp b/be/src/util/obj_lru_cache.cpp index 05b8b8824b5448..600ffdb647ce44 100644 --- a/be/src/util/obj_lru_cache.cpp +++ b/be/src/util/obj_lru_cache.cpp @@ -20,9 +20,9 @@ namespace doris { ObjLRUCache::ObjLRUCache(int64_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingManual( - CachePolicy::CacheType::COMMON_OBJ_LRU_CACHE, capacity, LRUCacheType::NUMBER, - config::common_obj_lru_cache_stale_sweep_time_sec, num_shards) { + : LRUCachePolicy(CachePolicy::CacheType::COMMON_OBJ_LRU_CACHE, capacity, + LRUCacheType::NUMBER, config::common_obj_lru_cache_stale_sweep_time_sec, + num_shards) { _enabled = (capacity > 0); } diff --git a/be/src/util/obj_lru_cache.h b/be/src/util/obj_lru_cache.h index c7f805fc3a1de2..680a32e79bc991 100644 --- a/be/src/util/obj_lru_cache.h +++ b/be/src/util/obj_lru_cache.h @@ -25,9 +25,9 @@ namespace doris { // A common object cache depends on an Sharded LRU Cache. // It has a certain capacity, which determin how many objects it can cache. // Caller must hold a CacheHandle instance when visiting the cached object. -class ObjLRUCache : public LRUCachePolicyTrackingManual { +class ObjLRUCache : public LRUCachePolicy { public: - using LRUCachePolicyTrackingManual::insert; + using LRUCachePolicy::insert; struct ObjKey { ObjKey(const std::string& key_) : key(key_) {} @@ -94,8 +94,8 @@ class ObjLRUCache : public LRUCachePolicyTrackingManual { if (_enabled) { const std::string& encoded_key = key.key; auto* obj_value = new ObjValue(value); - auto* handle = LRUCachePolicyTrackingManual::insert(encoded_key, obj_value, 1, - sizeof(T), CachePriority::NORMAL); + auto* handle = LRUCachePolicy::insert(encoded_key, obj_value, 1, sizeof(T), + CachePriority::NORMAL); *cache_handle = CacheHandle {this, handle}; } else { cache_handle = nullptr; diff --git a/be/src/util/ref_count_closure.h b/be/src/util/ref_count_closure.h index 01e523d9b9ad8c..92772a82373fec 100644 --- a/be/src/util/ref_count_closure.h +++ b/be/src/util/ref_count_closure.h @@ -23,6 +23,7 @@ #include "runtime/thread_context.h" #include "service/brpc.h" +#include "util/ref_count_closure.h" namespace doris { @@ -116,7 +117,10 @@ class AutoReleaseClosure : public google::protobuf::Closure { } virtual void _process_if_meet_error_status(const Status& status) { - LOG(WARNING) << "RPC meet error status: " << status; + // no need to log END_OF_FILE, reduce the unlessful log + if (!status.is()) { + LOG(WARNING) << "RPC meet error status: " << status; + } } private: @@ -125,7 +129,7 @@ class AutoReleaseClosure : public google::protobuf::Closure { template void _process_status(Response* response) { - if (auto status = Status::create(response->status()); !status) { + if (Status status = Status::create(response->status()); !status.ok()) { _process_if_meet_error_status(status); } } diff --git a/be/src/util/stack_util.cpp b/be/src/util/stack_util.cpp index 20daea588732f2..d84c4eb4f216e6 100644 --- a/be/src/util/stack_util.cpp +++ b/be/src/util/stack_util.cpp @@ -36,13 +36,19 @@ void DumpStackTraceToString(std::string* stacktrace); namespace doris { std::string get_stack_trace(int start_pointers_index, std::string dwarf_location_info_mode) { +#ifndef BE_TEST if (!config::enable_stacktrace) { return "no enable stacktrace"; } +#endif if (dwarf_location_info_mode.empty()) { dwarf_location_info_mode = config::dwarf_location_info_mode; } +#ifdef BE_TEST + auto tool = std::string {"libunwind"}; +#else auto tool = config::get_stack_trace_tool; +#endif if (tool == "glog") { return get_stack_trace_by_glog(); } else if (tool == "boost") { diff --git a/be/src/util/tdigest.h b/be/src/util/tdigest.h index 0a8168fe8e382d..8314ebeb8a351c 100644 --- a/be/src/util/tdigest.h +++ b/be/src/util/tdigest.h @@ -42,6 +42,8 @@ #pragma once +#include + #include #include #include @@ -647,7 +649,10 @@ class TDigest { // when complete, _unprocessed will be empty and _processed will have at most _max_processed centroids void process() { CentroidComparator cc; - RadixSort::executeLSD(_unprocessed.data(), _unprocessed.size()); + // select percentile_approx(lo_orderkey,0.5) from lineorder; + // have test pdqsort and RadixSort, find here pdqsort performance is better when data is struct Centroid + // But when sort plain type like int/float of std::vector, find RadixSort is better + pdqsort(_unprocessed.begin(), _unprocessed.end(), cc); auto count = _unprocessed.size(); _unprocessed.insert(_unprocessed.end(), _processed.cbegin(), _processed.cend()); std::inplace_merge(_unprocessed.begin(), _unprocessed.begin() + count, _unprocessed.end(), diff --git a/be/src/util/url_coding.cpp b/be/src/util/url_coding.cpp index d0bbf5aae63fc4..5871b4b9d32b77 100644 --- a/be/src/util/url_coding.cpp +++ b/be/src/util/url_coding.cpp @@ -17,41 +17,33 @@ #include "util/url_coding.h" +#include #include -#include -#include #include namespace doris { -static inline void url_encode(const char* in, int in_len, std::string* out) { - (*out).reserve(in_len); - std::stringstream ss; - - for (int i = 0; i < in_len; ++i) { - const char ch = in[i]; - - // Escape the character iff a) we are in Hive-compat mode and the - // character is in the Hive whitelist or b) we are not in - // Hive-compat mode, and the character is not alphanumeric or one - // of the four commonly excluded characters. - ss << ch; - } - - (*out) = ss.str(); +inline unsigned char to_hex(unsigned char x) { + return x + (x > 9 ? ('A' - 10) : '0'); } -void url_encode(const std::vector& in, std::string* out) { - if (in.empty()) { - *out = ""; - } else { - url_encode(reinterpret_cast(&in[0]), in.size(), out); +// Adapted from http://dlib.net/dlib/server/server_http.cpp.html +void url_encode(const std::string_view& in, std::string* out) { + std::ostringstream os; + for (auto c : in) { + // impl as https://docs.oracle.com/javase/8/docs/api/java/net/URLEncoder.html + if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || + c == '.' || c == '-' || c == '*' || c == '_') { // allowed + os << c; + } else if (c == ' ') { + os << '+'; + } else { + os << '%' << to_hex(c >> 4) << to_hex(c % 16); + } } -} -void url_encode(const std::string& in, std::string* out) { - url_encode(in.c_str(), in.size(), out); + *out = os.str(); } // Adapted from diff --git a/be/src/util/url_coding.h b/be/src/util/url_coding.h index 1a9fb4943b147e..b7e5136ecadf72 100644 --- a/be/src/util/url_coding.h +++ b/be/src/util/url_coding.h @@ -17,27 +17,19 @@ #pragma once -#include - +#include #include #include #include -#include namespace doris { // Utility method to URL-encode a string (that is, replace special // characters with %). -// The optional parameter hive_compat controls whether we mimic Hive's -// behaviour when encoding a string, which is only to encode certain -// characters (excluding, e.g., ' ') -void url_encode(const std::string& in, std::string* out); +void url_encode(const std::string_view& in, std::string* out); // Utility method to decode a string that was URL-encoded. Returns // true unless the string could not be correctly decoded. -// The optional parameter hive_compat controls whether or not we treat -// the strings as encoded by Hive, which means selectively ignoring -// certain characters like ' '. bool url_decode(const std::string& in, std::string* out); void base64_encode(const std::string& in, std::string* out); diff --git a/be/src/vec/aggregate_functions/aggregate_function.h b/be/src/vec/aggregate_functions/aggregate_function.h index 12d629b42c89f8..05f1bd2a602c68 100644 --- a/be/src/vec/aggregate_functions/aggregate_function.h +++ b/be/src/vec/aggregate_functions/aggregate_function.h @@ -170,9 +170,6 @@ class IAggregateFunction { virtual void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, Arena* arena) const = 0; - /// Returns true if a function requires Arena to handle own states (see add(), merge(), deserialize()). - virtual bool allocates_memory_in_arena() const { return false; } - /// Inserts results into a column. virtual void insert_result_into(ConstAggregateDataPtr __restrict place, IColumn& to) const = 0; diff --git a/be/src/vec/aggregate_functions/aggregate_function_binary.h b/be/src/vec/aggregate_functions/aggregate_function_binary.h index a5b6e2b1e0e316..9fba9d11a1013a 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_binary.h +++ b/be/src/vec/aggregate_functions/aggregate_function_binary.h @@ -62,12 +62,12 @@ struct AggregateFunctionBinary String get_name() const override { return StatFunc::Data::name(); } + void reset(AggregateDataPtr __restrict place) const override { this->data(place).reset(); } + DataTypePtr get_return_type() const override { return std::make_shared>(); } - bool allocates_memory_in_arena() const override { return false; } - void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena*) const override { this->data(place).add( diff --git a/be/src/vec/aggregate_functions/aggregate_function_collect.cpp b/be/src/vec/aggregate_functions/aggregate_function_collect.cpp index 2831f39aa30eba..4fcf09b59b33c6 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_collect.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_collect.cpp @@ -38,15 +38,18 @@ AggregateFunctionPtr do_create_agg_function_collect(bool distinct, const DataTyp } } - if (distinct) { - return creator_without_type::create, HasLimit, std::false_type>>( - argument_types, result_is_nullable); - } else { - return creator_without_type::create, HasLimit, std::false_type>>( - argument_types, result_is_nullable); + if constexpr (!std::is_same_v) { + if (distinct) { + return creator_without_type::create, HasLimit, std::false_type>>( + argument_types, result_is_nullable); + } else { + return creator_without_type::create, HasLimit, std::false_type>>( + argument_types, result_is_nullable); + } } + return nullptr; } template @@ -69,15 +72,21 @@ AggregateFunctionPtr create_aggregate_function_collect_impl(const std::string& n if (which.is_date_or_datetime()) { return do_create_agg_function_collect(distinct, argument_types, result_is_nullable); - } else if (which.is_date_v2()) { + } else if (which.is_date_v2() || which.is_ipv4()) { return do_create_agg_function_collect(distinct, argument_types, result_is_nullable); - } else if (which.is_date_time_v2()) { + } else if (which.is_date_time_v2() || which.is_ipv6()) { return do_create_agg_function_collect(distinct, argument_types, result_is_nullable); } else if (which.is_string()) { return do_create_agg_function_collect( distinct, argument_types, result_is_nullable); + } else { + // generic serialize which will not use specializations, ShowNull::value always means array_agg + if constexpr (ShowNull::value) { + return do_create_agg_function_collect( + distinct, argument_types, result_is_nullable); + } } LOG(WARNING) << fmt::format("unsupported input type {} for aggregate function {}", @@ -107,6 +116,7 @@ AggregateFunctionPtr create_aggregate_function_collect(const std::string& name, } void register_aggregate_function_collect_list(AggregateFunctionSimpleFactory& factory) { + // notice: array_agg only differs from collect_list in that array_agg will show null elements in array factory.register_function_both("collect_list", create_aggregate_function_collect); factory.register_function_both("collect_set", create_aggregate_function_collect); factory.register_function_both("array_agg", create_aggregate_function_collect); diff --git a/be/src/vec/aggregate_functions/aggregate_function_collect.h b/be/src/vec/aggregate_functions/aggregate_function_collect.h index b99ecd959245e3..68de426ea1fdcf 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_collect.h +++ b/be/src/vec/aggregate_functions/aggregate_function_collect.h @@ -512,6 +512,71 @@ struct AggregateFunctionArrayAggData { } }; +template <> +struct AggregateFunctionArrayAggData { + using ElementType = StringRef; + using Self = AggregateFunctionArrayAggData; + MutableColumnPtr column_data; + + AggregateFunctionArrayAggData() {} + + AggregateFunctionArrayAggData(const DataTypes& argument_types) { + DataTypePtr column_type = argument_types[0]; + column_data = column_type->create_column(); + } + + void add(const IColumn& column, size_t row_num) { column_data->insert_from(column, row_num); } + + void deserialize_and_merge(const IColumn& column, size_t row_num) { + auto& to_arr = assert_cast(column); + auto& to_nested_col = to_arr.get_data(); + auto start = to_arr.get_offsets()[row_num - 1]; + auto end = start + to_arr.get_offsets()[row_num] - to_arr.get_offsets()[row_num - 1]; + for (auto i = start; i < end; ++i) { + column_data->insert_from(to_nested_col, i); + } + } + + void reset() { column_data->clear(); } + + void insert_result_into(IColumn& to) const { + auto& to_arr = assert_cast(to); + auto& to_nested_col = to_arr.get_data(); + size_t num_rows = column_data->size(); + for (size_t i = 0; i < num_rows; ++i) { + to_nested_col.insert_from(*column_data, i); + } + to_arr.get_offsets().push_back(to_nested_col.size()); + } + + void write(BufferWritable& buf) const { + const size_t size = column_data->size(); + write_binary(size, buf); + for (size_t i = 0; i < size; i++) { + write_string_binary(column_data->get_data_at(i), buf); + } + } + + void read(BufferReadable& buf) { + size_t size = 0; + read_binary(size, buf); + column_data->reserve(size); + + StringRef s; + for (size_t i = 0; i < size; i++) { + read_string_binary(s, buf); + column_data->insert_data(s.data, s.size); + } + } + + void merge(const Self& rhs) { + const auto size = rhs.column_data->size(); + for (size_t i = 0; i < size; i++) { + column_data->insert_from(*rhs.column_data, i); + } + } +}; + //ShowNull is just used to support array_agg because array_agg needs to display NULL //todo: Supports order by sorting for array_agg template @@ -546,7 +611,8 @@ class AggregateFunctionCollect void create(AggregateDataPtr __restrict place) const override { if constexpr (ShowNull::value) { - if constexpr (IsDecimalNumber) { + if constexpr (IsDecimalNumber || + std::is_same_v>) { new (place) Data(argument_types); } else { new (place) Data(); @@ -560,8 +626,6 @@ class AggregateFunctionCollect return std::make_shared(make_nullable(return_type)); } - bool allocates_memory_in_arena() const override { return ENABLE_ARENA; } - void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena* arena) const override { auto& data = this->data(place); @@ -719,13 +783,15 @@ class AggregateFunctionCollect for (size_t i = 0; i < num_rows; ++i) { col_null->get_null_map_data().push_back(col_src.get_null_map_data()[i]); - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v>) { auto& vec = assert_cast( col_null->get_nested_column()); const auto& vec_src = assert_cast( col_src.get_nested_column()); vec.insert_from(vec_src, i); + } else if constexpr (std::is_same_v>) { + to_nested_col.insert_from(col_src.get_nested_column(), i); } else { using ColVecType = ColumnVectorOrDecimal; auto& vec = assert_cast( diff --git a/be/src/vec/aggregate_functions/aggregate_function_corr.cpp b/be/src/vec/aggregate_functions/aggregate_function_corr.cpp index 8237f588298064..a454afb45f22e0 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_corr.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_corr.cpp @@ -76,6 +76,15 @@ struct CorrMoment { } static String name() { return "corr"; } + + void reset() { + m0 = {}; + x1 = {}; + y1 = {}; + xy = {}; + x2 = {}; + y2 = {}; + } }; AggregateFunctionPtr create_aggregate_corr_function(const std::string& name, diff --git a/be/src/vec/aggregate_functions/aggregate_function_covar.cpp b/be/src/vec/aggregate_functions/aggregate_function_covar.cpp index 790d0270aa39e8..76a2881dd78280 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_covar.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_covar.cpp @@ -81,9 +81,11 @@ void register_aggregate_function_covar_pop(AggregateFunctionSimpleFactory& facto void register_aggregate_function_covar_samp_old(AggregateFunctionSimpleFactory& factory) { factory.register_alternative_function( - "covar_samp", create_aggregate_function_covariance_samp_old); - factory.register_alternative_function( - "covar_samp", create_aggregate_function_covariance_samp_old, NULLABLE); + "covar_samp", create_aggregate_function_covariance_samp_old, false, + AGG_FUNCTION_NULLABLE); + factory.register_alternative_function("covar_samp", + create_aggregate_function_covariance_samp_old, + true, AGG_FUNCTION_NULLABLE); } void register_aggregate_function_covar_samp(AggregateFunctionSimpleFactory& factory) { diff --git a/be/src/vec/aggregate_functions/aggregate_function_covar.h b/be/src/vec/aggregate_functions/aggregate_function_covar.h index 9b4b1b70c1fa7f..78a3eae5bcb4e9 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_covar.h +++ b/be/src/vec/aggregate_functions/aggregate_function_covar.h @@ -17,6 +17,8 @@ #pragma once +#include + #include "agent/be_exec_version_manager.h" #define POP true #define NOTPOP false @@ -138,13 +140,22 @@ struct PopData : Data { template struct SampData_OLDER : Data { void insert_result_into(IColumn& to) const { - ColumnNullable& nullable_column = assert_cast(to); - if (this->count == 1 || this->count == 0) { - nullable_column.insert_default(); + if (to.is_nullable()) { + ColumnNullable& nullable_column = assert_cast(to); + if (this->count == 1 || this->count == 0) { + nullable_column.insert_default(); + } else { + auto& col = assert_cast(nullable_column.get_nested_column()); + col.get_data().push_back(this->get_samp_result()); + nullable_column.get_null_map_data().push_back(0); + } } else { - auto& col = assert_cast(nullable_column.get_nested_column()); - col.get_data().push_back(this->get_samp_result()); - nullable_column.get_null_map_data().push_back(0); + auto& col = assert_cast(to); + if (this->count == 1 || this->count == 0) { + col.insert_default(); + } else { + col.get_data().push_back(this->get_samp_result()); + } } } static DataTypePtr get_return_type() { @@ -195,12 +206,30 @@ class AggregateFunctionSampCovariance this->data(place).add(columns[0], columns[1], row_num); } else { if constexpr (is_nullable) { //this if check could remove with old function + // nullable means at least one child is null. + // so here, maybe JUST ONE OF ups is null. so nullptr perhaps in ..._x or ..._y! const auto* nullable_column_x = check_and_get_column(columns[0]); const auto* nullable_column_y = check_and_get_column(columns[1]); - if (!nullable_column_x->is_null_at(row_num) && - !nullable_column_y->is_null_at(row_num)) { - this->data(place).add(&nullable_column_x->get_nested_column(), - &nullable_column_y->get_nested_column(), row_num); + + if (nullable_column_x && nullable_column_y) { // both nullable + if (!nullable_column_x->is_null_at(row_num) && + !nullable_column_y->is_null_at(row_num)) { + this->data(place).add(&nullable_column_x->get_nested_column(), + &nullable_column_y->get_nested_column(), row_num); + } + } else if (nullable_column_x) { // x nullable + if (!nullable_column_x->is_null_at(row_num)) { + this->data(place).add(&nullable_column_x->get_nested_column(), columns[1], + row_num); + } + } else if (nullable_column_y) { // y nullable + if (!nullable_column_y->is_null_at(row_num)) { + this->data(place).add(columns[0], &nullable_column_y->get_nested_column(), + row_num); + } + } else { + throw Exception(ErrorCode::INTERNAL_ERROR, + "Nullable function {} get non-nullable columns!", get_name()); } } else { this->data(place).add(columns[0], columns[1], row_num); diff --git a/be/src/vec/aggregate_functions/aggregate_function_distinct.h b/be/src/vec/aggregate_functions/aggregate_function_distinct.h index 6193b28a131e9f..ec6936a128c869 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_distinct.h +++ b/be/src/vec/aggregate_functions/aggregate_function_distinct.h @@ -338,8 +338,6 @@ class AggregateFunctionDistinct DataTypePtr get_return_type() const override { return nested_func->get_return_type(); } - bool allocates_memory_in_arena() const override { return true; } - AggregateFunctionPtr transmit_to_stable() override { return AggregateFunctionPtr(new AggregateFunctionDistinct( nested_func, IAggregateFunction::argument_types)); diff --git a/be/src/vec/aggregate_functions/aggregate_function_foreach.h b/be/src/vec/aggregate_functions/aggregate_function_foreach.h index 4261ef24343b95..7f746e53daac70 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_foreach.h +++ b/be/src/vec/aggregate_functions/aggregate_function_foreach.h @@ -219,10 +219,6 @@ class AggregateFunctionForEach : public IAggregateFunctionDataHelperallocates_memory_in_arena(); - } - void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena* arena) const override { std::vector nested(num_arguments); diff --git a/be/src/vec/aggregate_functions/aggregate_function_group_array_intersect.h b/be/src/vec/aggregate_functions/aggregate_function_group_array_intersect.h index d8cf91865f1ed2..94b34caff78645 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_group_array_intersect.h +++ b/be/src/vec/aggregate_functions/aggregate_function_group_array_intersect.h @@ -93,6 +93,11 @@ struct AggregateFunctionGroupArrayIntersectData { Set value; bool init = false; + void reset() { + init = false; + value = std::make_unique(); + } + void process_col_data(auto& column_data, size_t offset, size_t arr_size, bool& init, Set& set) { const bool is_column_data_nullable = column_data.is_nullable(); @@ -163,7 +168,7 @@ class AggregateFunctionGroupArrayIntersect DataTypePtr get_return_type() const override { return argument_type; } - bool allocates_memory_in_arena() const override { return false; } + void reset(AggregateDataPtr __restrict place) const override { this->data(place).reset(); } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena*) const override { @@ -331,6 +336,11 @@ struct AggregateFunctionGroupArrayIntersectGenericData { : value(std::make_unique()) {} Set value; bool init = false; + + void reset() { + init = false; + value = std::make_unique(); + } }; /** Template parameter with true value should be used for columns that store their elements in memory continuously. @@ -357,7 +367,7 @@ class AggregateFunctionGroupArrayIntersectGeneric DataTypePtr get_return_type() const override { return input_data_type; } - bool allocates_memory_in_arena() const override { return true; } + void reset(AggregateDataPtr __restrict place) const override { this->data(place).reset(); } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena* arena) const override { diff --git a/be/src/vec/aggregate_functions/aggregate_function_null.h b/be/src/vec/aggregate_functions/aggregate_function_null.h index 382fb8f7a5310e..014a3e9c603b88 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_null.h +++ b/be/src/vec/aggregate_functions/aggregate_function_null.h @@ -74,6 +74,7 @@ class AggregateFunctionNullBaseInline : public IAggregateFunctionHelper const DataTypes& arguments) : IAggregateFunctionHelper(arguments), nested_function {assert_cast(nested_function_)} { + DCHECK(nested_function_ != nullptr); if (result_is_nullable) { prefix_size = nested_function->align_of_data(); } else { @@ -176,10 +177,6 @@ class AggregateFunctionNullBaseInline : public IAggregateFunctionHelper nested_function->insert_result_into(nested_place(place), to); } } - - bool allocates_memory_in_arena() const override { - return nested_function->allocates_memory_in_arena(); - } }; /** There are two cases: for single argument and variadic. @@ -328,10 +325,6 @@ class AggregateFunctionNullVariadicInline final arena); } - bool allocates_memory_in_arena() const override { - return this->nested_function->allocates_memory_in_arena(); - } - private: // The array length is fixed in the implementation of some aggregate functions. // Therefore we choose 256 as the appropriate maximum length limit. diff --git a/be/src/vec/aggregate_functions/aggregate_function_orthogonal_bitmap.h b/be/src/vec/aggregate_functions/aggregate_function_orthogonal_bitmap.h index deb53241abb92f..a81bdcddaa3310 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_orthogonal_bitmap.h +++ b/be/src/vec/aggregate_functions/aggregate_function_orthogonal_bitmap.h @@ -52,6 +52,11 @@ struct AggOrthBitmapBaseData { public: using ColVecData = std::conditional_t, ColumnVector, ColumnString>; + void reset() { + bitmap = {}; + first_init = true; + } + void add(const IColumn** columns, size_t row_num) { const auto& bitmap_col = assert_cast(*columns[0]); @@ -99,6 +104,11 @@ struct AggOrthBitMapIntersect : public AggOrthBitmapBaseData { static DataTypePtr get_return_type() { return std::make_shared(); } + void reset() { + AggOrthBitmapBaseData::reset(); + result.reset(); + } + void merge(const AggOrthBitMapIntersect& rhs) { if (rhs.first_init) { return; @@ -120,7 +130,8 @@ struct AggOrthBitMapIntersect : public AggOrthBitmapBaseData { void get(IColumn& to) const { auto& column = assert_cast(to); - column.get_data().emplace_back(result); + column.get_data().emplace_back(result.empty() ? AggOrthBitmapBaseData::bitmap.intersect() + : result); } private: @@ -170,6 +181,11 @@ struct AggOrthBitMapIntersectCount : public AggOrthBitmapBaseData { static DataTypePtr get_return_type() { return std::make_shared(); } + void reset() { + AggOrthBitmapBaseData::reset(); + result = 0; + } + void merge(const AggOrthBitMapIntersectCount& rhs) { if (rhs.first_init) { return; @@ -225,6 +241,11 @@ struct AggOrthBitmapExprCalBaseData { } } + void reset() { + bitmap_expr_cal = {}; + first_init = true; + } + protected: doris::BitmapExprCalculation bitmap_expr_cal; bool first_init = true; @@ -263,6 +284,11 @@ struct AggOrthBitMapExprCal : public AggOrthBitmapExprCalBaseData { ->bitmap_expr_cal.bitmap_calculate()); } + void reset() { + AggOrthBitmapExprCalBaseData::reset(); + result.reset(); + } + private: BitmapValue result; }; @@ -299,6 +325,11 @@ struct AggOrthBitMapExprCalCount : public AggOrthBitmapExprCalBaseData { ->bitmap_expr_cal.bitmap_calculate_count()); } + void reset() { + AggOrthBitmapExprCalBaseData::reset(); + result = 0; + } + private: int64_t result = 0; }; @@ -330,6 +361,11 @@ struct OrthBitmapUnionCountData { column.get_data().emplace_back(result ? result : value.cardinality()); } + void reset() { + value.reset(); + result = 0; + } + private: BitmapValue value; int64_t result = 0; @@ -347,6 +383,8 @@ class AggFunctionOrthBitmapFunc final DataTypePtr get_return_type() const override { return Impl::get_return_type(); } + void reset(AggregateDataPtr __restrict place) const override { this->data(place).reset(); } + void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena*) const override { this->data(place).init_add_key(columns, row_num, _argument_size); diff --git a/be/src/vec/aggregate_functions/aggregate_function_percentile.cpp b/be/src/vec/aggregate_functions/aggregate_function_percentile.cpp index a8767e6fae7a20..b0da562bd73b6c 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_percentile.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_percentile.cpp @@ -33,8 +33,8 @@ AggregateFunctionPtr create_aggregate_function_percentile_approx_older( } if (argument_types.size() == 2) { return creator_without_type::create< - AggregateFunctionPercentileApproxTwoParams_OLDER>((argument_types), - result_is_nullable); + AggregateFunctionPercentileApproxTwoParams_OLDER>( + remove_nullable(argument_types), result_is_nullable); } if (argument_types.size() == 3) { return creator_without_type::create< @@ -111,16 +111,20 @@ void register_aggregate_function_percentile(AggregateFunctionSimpleFactory& fact } void register_percentile_approx_old_function(AggregateFunctionSimpleFactory& factory) { - factory.register_alternative_function( - "percentile_approx", create_aggregate_function_percentile_approx_older, false); - factory.register_alternative_function( - "percentile_approx", create_aggregate_function_percentile_approx_older, true); + factory.register_alternative_function("percentile_approx", + create_aggregate_function_percentile_approx_older, + false, AGG_FUNCTION_NULLABLE); + factory.register_alternative_function("percentile_approx", + create_aggregate_function_percentile_approx_older, + true, AGG_FUNCTION_NULLABLE); factory.register_alternative_function( "percentile_approx_weighted", - create_aggregate_function_percentile_approx_weighted_older, false); + create_aggregate_function_percentile_approx_weighted_older, false, + AGG_FUNCTION_NULLABLE); factory.register_alternative_function( "percentile_approx_weighted", - create_aggregate_function_percentile_approx_weighted_older, true); + create_aggregate_function_percentile_approx_weighted_older, true, + AGG_FUNCTION_NULLABLE); } void register_aggregate_function_percentile_approx(AggregateFunctionSimpleFactory& factory) { diff --git a/be/src/vec/aggregate_functions/aggregate_function_percentile.h b/be/src/vec/aggregate_functions/aggregate_function_percentile.h index cd328a85f3485a..1c8a12340d7096 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_percentile.h +++ b/be/src/vec/aggregate_functions/aggregate_function_percentile.h @@ -583,11 +583,14 @@ class AggregateFunctionPercentileApproxWeightedFourParams template struct PercentileState { mutable std::vector> vec_counts; - std::vector vec_quantile; + std::vector vec_quantile {-1}; bool inited_flag = false; void write(BufferWritable& buf) const { write_binary(inited_flag, buf); + if (!inited_flag) { + return; + } int size_num = vec_quantile.size(); write_binary(size_num, buf); for (const auto& quantile : vec_quantile) { @@ -600,6 +603,9 @@ struct PercentileState { void read(BufferReadable& buf) { read_binary(inited_flag, buf); + if (!inited_flag) { + return; + } int size_num = 0; read_binary(size_num, buf); double data = 0.0; diff --git a/be/src/vec/aggregate_functions/aggregate_function_percentile_approx.cpp b/be/src/vec/aggregate_functions/aggregate_function_percentile_approx.cpp index 01fdddf6074ce6..5ad1ea8f2d3d70 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_percentile_approx.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_percentile_approx.cpp @@ -24,13 +24,16 @@ namespace doris::vectorized { void register_aggregate_function_percentile_old(AggregateFunctionSimpleFactory& factory) { factory.register_alternative_function( - "percentile", creator_without_type::creator); + "percentile", creator_without_type::creator, false, + AGG_FUNCTION_NULLABLE); factory.register_alternative_function( - "percentile", creator_without_type::creator, true); + "percentile", creator_without_type::creator, true, + AGG_FUNCTION_NULLABLE); factory.register_alternative_function( - "percentile_array", creator_without_type::creator); + "percentile_array", creator_without_type::creator, + false, AGG_FUNCTION_NULLABLE); factory.register_alternative_function( "percentile_array", creator_without_type::creator, - true); + true, AGG_FUNCTION_NULLABLE); } } // namespace doris::vectorized \ No newline at end of file diff --git a/be/src/vec/aggregate_functions/aggregate_function_simple_factory.h b/be/src/vec/aggregate_functions/aggregate_function_simple_factory.h index cc504b9f99609d..b22504dda9ca05 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_simple_factory.h +++ b/be/src/vec/aggregate_functions/aggregate_function_simple_factory.h @@ -59,11 +59,6 @@ class AggregateFunctionSimpleFactory { AggregateFunctions aggregate_functions; AggregateFunctions nullable_aggregate_functions; std::unordered_map function_alias; - /// @TEMPORARY: for be_exec_version=4 - /// in order to solve agg of sum/count is not compatibility during the upgrade process - constexpr static int AGG_FUNCTION_NEW = 7; - /// @TEMPORARY: for be_exec_version < AGG_FUNCTION_NEW. replace function to old version. - std::unordered_map function_to_replace; public: void register_nullable_function_combinator(const Creator& creator) { @@ -177,21 +172,19 @@ class AggregateFunctionSimpleFactory { } } - /// @TEMPORARY: for be_exec_version < AGG_FUNCTION_NEW void register_alternative_function(const std::string& name, const Creator& creator, - bool nullable = false) { - static std::string suffix {"_old_for_version_before_2_0"}; - register_function(name + suffix, creator, nullable); - function_to_replace[name] = name + suffix; + bool nullable, int old_be_exec_version) { + auto new_name = name + BeExecVersionManager::get_function_suffix(old_be_exec_version); + register_function(new_name, creator, nullable); + BeExecVersionManager::registe_old_function_compatibility(old_be_exec_version, name); } - /// @TEMPORARY: for be_exec_version < AGG_FUNCTION_NEW void temporary_function_update(int fe_version_now, std::string& name) { - // replace if fe is old version. - if (fe_version_now < AGG_FUNCTION_NEW && - function_to_replace.find(name) != function_to_replace.end()) { - name = function_to_replace[name]; + int old_version = BeExecVersionManager::get_function_compatibility(fe_version_now, name); + if (!old_version) { + return; } + name = name + BeExecVersionManager::get_function_suffix(old_version); } static AggregateFunctionSimpleFactory& instance(); diff --git a/be/src/vec/aggregate_functions/aggregate_function_stddev.cpp b/be/src/vec/aggregate_functions/aggregate_function_stddev.cpp index 1d977c1c5285f4..b9e39552395ffa 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_stddev.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_stddev.cpp @@ -109,13 +109,17 @@ void register_aggregate_function_stddev_variance_pop(AggregateFunctionSimpleFact void register_aggregate_function_stddev_variance_samp_old(AggregateFunctionSimpleFactory& factory) { factory.register_alternative_function( - "variance_samp", create_aggregate_function_variance_samp_older); + "variance_samp", create_aggregate_function_variance_samp_older, false, + AGG_FUNCTION_NULLABLE); factory.register_alternative_function( - "variance_samp", create_aggregate_function_variance_samp_older, true); + "variance_samp", create_aggregate_function_variance_samp_older, true, + AGG_FUNCTION_NULLABLE); factory.register_alternative_function("stddev_samp", - create_aggregate_function_stddev_samp_older); - factory.register_alternative_function( - "stddev_samp", create_aggregate_function_stddev_samp_older, true); + create_aggregate_function_stddev_samp_older, + false, AGG_FUNCTION_NULLABLE); + factory.register_alternative_function("stddev_samp", + create_aggregate_function_stddev_samp_older, + true, AGG_FUNCTION_NULLABLE); } void register_aggregate_function_stddev_variance_samp(AggregateFunctionSimpleFactory& factory) { diff --git a/be/src/vec/aggregate_functions/aggregate_function_uniq.h b/be/src/vec/aggregate_functions/aggregate_function_uniq.h index 356e0ead2d3d56..e97923a08e6a2d 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_uniq.h +++ b/be/src/vec/aggregate_functions/aggregate_function_uniq.h @@ -76,6 +76,8 @@ struct AggregateFunctionUniqExactData { Set set; static String get_name() { return "multi_distinct"; } + + void reset() { set.clear(); } }; namespace detail { @@ -115,6 +117,8 @@ class AggregateFunctionUniq final DataTypePtr get_return_type() const override { return std::make_shared(); } + void reset(AggregateDataPtr __restrict place) const override { this->data(place).reset(); } + void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena*) const override { detail::OneAdder::add(this->data(place), *columns[0], row_num); diff --git a/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h b/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h index 3eaa6418f0b7ca..4c3fa67e1626ae 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h +++ b/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h @@ -68,6 +68,11 @@ struct AggregateFunctionUniqDistributeKeyData { Set set; UInt64 count = 0; + + void reset() { + set.clear(); + count = 0; + } }; template @@ -83,6 +88,8 @@ class AggregateFunctionUniqDistributeKey final DataTypePtr get_return_type() const override { return std::make_shared(); } + void reset(AggregateDataPtr __restrict place) const override { this->data(place).reset(); } + void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, Arena*) const override { detail::OneAdder::add(this->data(place), *columns[0], row_num); diff --git a/be/src/vec/aggregate_functions/aggregate_function_window.h b/be/src/vec/aggregate_functions/aggregate_function_window.h index ec1aab99e6a5fe..92e22c895c4ab8 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_window.h +++ b/be/src/vec/aggregate_functions/aggregate_function_window.h @@ -499,9 +499,9 @@ template struct WindowFunctionLastImpl : Data { void add_range_single_place(int64_t partition_start, int64_t partition_end, int64_t frame_start, int64_t frame_end, const IColumn** columns) { - if ((frame_start <= frame_end) && - ((frame_end <= partition_start) || - (frame_start >= partition_end))) { //beyond or under partition, set null + DCHECK_LE(frame_start, frame_end); + if ((frame_end <= partition_start) || + (frame_start >= partition_end)) { //beyond or under partition, set null this->set_is_null(); return; } diff --git a/be/src/vec/aggregate_functions/aggregate_function_window_funnel.cpp b/be/src/vec/aggregate_functions/aggregate_function_window_funnel.cpp index 8bfdcc26f4310b..598c23eb147bf4 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_window_funnel.cpp +++ b/be/src/vec/aggregate_functions/aggregate_function_window_funnel.cpp @@ -77,8 +77,10 @@ void register_aggregate_function_window_funnel(AggregateFunctionSimpleFactory& f } void register_aggregate_function_window_funnel_old(AggregateFunctionSimpleFactory& factory) { factory.register_alternative_function("window_funnel", - create_aggregate_function_window_funnel_old, true); + create_aggregate_function_window_funnel_old, true, + AGG_FUNCTION_NEW_WINDOW_FUNNEL); factory.register_alternative_function("window_funnel", - create_aggregate_function_window_funnel_old, false); + create_aggregate_function_window_funnel_old, false, + AGG_FUNCTION_NEW_WINDOW_FUNNEL); } } // namespace doris::vectorized diff --git a/be/src/vec/aggregate_functions/aggregate_function_window_funnel.h b/be/src/vec/aggregate_functions/aggregate_function_window_funnel.h index 1f6a6e74968f42..c0f0a4e7e20ad0 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_window_funnel.h +++ b/be/src/vec/aggregate_functions/aggregate_function_window_funnel.h @@ -84,10 +84,9 @@ struct WindowFunnelState { bool enable_mode; WindowFunnelMode window_funnel_mode; mutable vectorized::MutableBlock mutable_block; - ColumnVector::Container* timestamp_column_data; + ColumnVector::Container* timestamp_column_data = nullptr; std::vector::Container*> event_columns_datas; SortDescription sort_description {1}; - bool sorted; WindowFunnelState() { event_count = 0; @@ -97,20 +96,15 @@ struct WindowFunnelState { sort_description[0].column_number = 0; sort_description[0].direction = 1; sort_description[0].nulls_direction = -1; - sorted = false; } WindowFunnelState(int arg_event_count) : WindowFunnelState() { event_count = arg_event_count; + event_columns_datas.resize(event_count); auto timestamp_column = ColumnVector::create(); - timestamp_column_data = - &assert_cast&>(*timestamp_column).get_data(); MutableColumns event_columns; for (int i = 0; i < event_count; i++) { - auto event_column = ColumnVector::create(); - event_columns_datas.emplace_back( - &assert_cast&>(*event_column).get_data()); - event_columns.emplace_back(std::move(event_column)); + event_columns.emplace_back(ColumnVector::create()); } Block tmp_block; tmp_block.insert({std::move(timestamp_column), @@ -122,15 +116,18 @@ struct WindowFunnelState { } mutable_block = MutableBlock(std::move(tmp_block)); + _reset_columns_ptr(); } - void reset() { - window = 0; - mutable_block.clear(); - timestamp_column_data = nullptr; - event_columns_datas.clear(); - sorted = false; + void _reset_columns_ptr() { + auto& ts_column = mutable_block.get_column_by_position(0); + timestamp_column_data = &assert_cast&>(*ts_column).get_data(); + for (int i = 0; i != event_count; i++) { + auto& event_column = mutable_block.get_column_by_position(i + 1); + event_columns_datas[i] = &assert_cast&>(*event_column).get_data(); + } } + void reset() { mutable_block.clear_column_data(); } void add(const IColumn** arg_columns, ssize_t row_num, int64_t win, WindowFunnelMode mode) { window = win; @@ -146,26 +143,23 @@ struct WindowFunnelState { } void sort() { - if (sorted) { - return; - } - Block tmp_block = mutable_block.to_block(); auto block = tmp_block.clone_without_columns(); sort_block(tmp_block, block, sort_description, 0); - mutable_block = MutableBlock(std::move(block)); - sorted = true; + mutable_block = std::move(block); + _reset_columns_ptr(); } template - int _match_event_list(size_t& start_row, size_t row_count, - const NativeType* timestamp_data) const { + int _match_event_list(size_t& start_row, size_t row_count) const { int matched_count = 0; DateValueType start_timestamp; DateValueType end_timestamp; TimeInterval interval(SECOND, window, false); int column_idx = 1; + + const NativeType* timestamp_data = timestamp_column_data->data(); const auto& first_event_column = mutable_block.get_column_by_position(column_idx); const auto& first_event_data = assert_cast&>(*first_event_column).get_data(); @@ -180,12 +174,13 @@ struct WindowFunnelState { column_idx++; auto last_match_row = match_row; - for (; column_idx < event_count + 1; column_idx++) { + ++match_row; + for (; column_idx < event_count + 1 && match_row < row_count; + column_idx++, match_row++) { const auto& event_column = mutable_block.get_column_by_position(column_idx); const auto& event_data = assert_cast&>(*event_column).get_data(); if constexpr (WINDOW_FUNNEL_MODE == WindowFunnelMode::FIXED) { - ++match_row; if (event_data[match_row] == 1) { auto current_timestamp = binary_cast(timestamp_data[match_row]); @@ -196,7 +191,7 @@ struct WindowFunnelState { } break; } - match_row = simd::find_one(event_data.data(), match_row + 1, row_count); + match_row = simd::find_one(event_data.data(), match_row, row_count); if (match_row < row_count) { auto current_timestamp = binary_cast(timestamp_data[match_row]); @@ -249,14 +244,9 @@ struct WindowFunnelState { int _get_internal() const { size_t start_row = 0; int max_found_event_count = 0; - const auto& ts_column = mutable_block.get_column_by_position(0)->get_ptr(); - const auto& timestamp_data = - assert_cast&>(*ts_column).get_data().data(); - auto row_count = mutable_block.rows(); while (start_row < row_count) { - auto found_event_count = - _match_event_list(start_row, row_count, timestamp_data); + auto found_event_count = _match_event_list(start_row, row_count); if (found_event_count == event_count) { return found_event_count; } @@ -323,6 +313,7 @@ struct WindowFunnelState { status = block.serialize( 5, &pblock, &uncompressed_bytes, &compressed_bytes, segment_v2::CompressionTypePB::ZSTD); // ZSTD for better compression ratio + block.clear_column_data(); if (!status.ok()) { throw doris::Exception(ErrorCode::INTERNAL_ERROR, status.to_string()); return; @@ -335,6 +326,9 @@ struct WindowFunnelState { auto data_bytes = buff.size(); write_var_uint(data_bytes, out); out.write(buff.data(), data_bytes); + + mutable_block = std::move(block); + const_cast*>(this)->_reset_columns_ptr(); } void read(BufferReadable& in) { @@ -365,6 +359,7 @@ struct WindowFunnelState { throw doris::Exception(ErrorCode::INTERNAL_ERROR, status.to_string()); } mutable_block = MutableBlock(std::move(block)); + _reset_columns_ptr(); } }; diff --git a/be/src/vec/columns/column_decimal.cpp b/be/src/vec/columns/column_decimal.cpp index beeb6224c2253c..2e5fc5e136a508 100644 --- a/be/src/vec/columns/column_decimal.cpp +++ b/be/src/vec/columns/column_decimal.cpp @@ -23,8 +23,6 @@ #include #include -#include -#include #include "olap/decimal12.h" #include "runtime/decimalv2_value.h" diff --git a/be/src/vec/columns/column_nothing.h b/be/src/vec/columns/column_nothing.h index 8a10eec8b6f36d..79cb779c7d16d2 100644 --- a/be/src/vec/columns/column_nothing.h +++ b/be/src/vec/columns/column_nothing.h @@ -32,6 +32,13 @@ class ColumnNothing final : public COWHelper { ColumnNothing(const ColumnNothing&) = default; + ColumnPtr permute(const Permutation& perm, size_t limit) const override { + return clone_dummy(limit ? std::min(s, limit) : s); + } + Field operator[](size_t) const override { return {}; } + void get(size_t, Field& f) const override { f = {}; } + void insert(const Field&) override { ++s; } + public: const char* get_family_name() const override { return "Nothing"; } MutableColumnPtr clone_dummy(size_t s_) const override { return ColumnNothing::create(s_); } diff --git a/be/src/vec/columns/column_nullable.cpp b/be/src/vec/columns/column_nullable.cpp index 483ed5ca6cd59f..dbee5a2025aa70 100644 --- a/be/src/vec/columns/column_nullable.cpp +++ b/be/src/vec/columns/column_nullable.cpp @@ -31,18 +31,19 @@ namespace doris::vectorized { ColumnNullable::ColumnNullable(MutableColumnPtr&& nested_column_, MutableColumnPtr&& null_map_) - : nested_column(std::move(nested_column_)), null_map(std::move(null_map_)) { + : NullMapProvider(std::move(null_map_)), nested_column(std::move(nested_column_)) { /// ColumnNullable cannot have constant nested column. But constant argument could be passed. Materialize it. nested_column = get_nested_column().convert_to_full_column_if_const(); // after convert const column to full column, it may be a nullable column if (nested_column->is_nullable()) { - assert_cast(*nested_column).apply_null_map((const ColumnUInt8&)*null_map); - null_map = assert_cast(*nested_column).get_null_map_column_ptr(); + assert_cast(*nested_column) + .apply_null_map(static_cast(get_null_map_column())); + reset_null_map(assert_cast(*nested_column).get_null_map_column_ptr()); nested_column = assert_cast(*nested_column).get_nested_column_ptr(); } - if (is_column_const(*null_map)) { + if (is_column_const(get_null_map_column())) [[unlikely]] { throw doris::Exception(ErrorCode::INTERNAL_ERROR, "ColumnNullable cannot have constant null map"); __builtin_unreachable(); @@ -69,7 +70,7 @@ void ColumnNullable::update_xxHash_with_value(size_t start, size_t end, uint64_t nested_column->update_xxHash_with_value(start, end, hash, nullptr); } else { const auto* __restrict real_null_data = - assert_cast(*null_map).get_data().data(); + assert_cast(get_null_map_column()).get_data().data(); for (int i = start; i < end; ++i) { if (real_null_data[i] != 0) { hash = HashUtil::xxHash64NullWithSeed(hash); @@ -85,7 +86,7 @@ void ColumnNullable::update_crc_with_value(size_t start, size_t end, uint32_t& h nested_column->update_crc_with_value(start, end, hash, nullptr); } else { const auto* __restrict real_null_data = - assert_cast(*null_map).get_data().data(); + assert_cast(get_null_map_column()).get_data().data(); for (int i = start; i < end; ++i) { if (real_null_data[i] != 0) { hash = HashUtil::zlib_crc_hash_null(hash); @@ -110,7 +111,7 @@ void ColumnNullable::update_crcs_with_value(uint32_t* __restrict hashes, doris:: auto s = rows; DCHECK(s == size()); const auto* __restrict real_null_data = - assert_cast(*null_map).get_data().data(); + assert_cast(get_null_map_column()).get_data().data(); if (!has_null()) { nested_column->update_crcs_with_value(hashes, type, rows, offset, nullptr); } else { @@ -128,7 +129,7 @@ void ColumnNullable::update_hashes_with_value(uint64_t* __restrict hashes, DCHECK(null_data == nullptr); auto s = size(); const auto* __restrict real_null_data = - assert_cast(*null_map).get_data().data(); + assert_cast(get_null_map_column()).get_data().data(); if (!has_null()) { nested_column->update_hashes_with_value(hashes, nullptr); } else { @@ -183,24 +184,24 @@ StringRef ColumnNullable::get_data_at(size_t n) const { void ColumnNullable::insert_data(const char* pos, size_t length) { if (pos == nullptr) { get_nested_column().insert_default(); - _get_null_map_data().push_back(1); + get_null_map_data().push_back(1); _has_null = true; + _need_update_has_null = false; } else { get_nested_column().insert_data(pos, length); - _get_null_map_data().push_back(0); + _push_false_to_nullmap(1); } } void ColumnNullable::insert_many_strings(const StringRef* strings, size_t num) { - auto& null_map_data = _get_null_map_data(); for (size_t i = 0; i != num; ++i) { if (strings[i].data == nullptr) { nested_column->insert_default(); - null_map_data.push_back(1); + get_null_map_data().push_back(1); _has_null = true; } else { nested_column->insert_data(strings[i].data, strings[i].size); - null_map_data.push_back(0); + _push_false_to_nullmap(1); } } } @@ -227,13 +228,14 @@ const char* ColumnNullable::deserialize_and_insert_from_arena(const char* pos) { UInt8 val = *reinterpret_cast(pos); pos += sizeof(val); - _get_null_map_data().push_back(val); + get_null_map_data().push_back(val); if (val == 0) { pos = get_nested_column().deserialize_and_insert_from_arena(pos); } else { get_nested_column().insert_default(); _has_null = true; + _need_update_has_null = false; } return pos; @@ -251,7 +253,7 @@ void ColumnNullable::serialize_vec(std::vector& keys, size_t num_rows } void ColumnNullable::deserialize_vec(std::vector& keys, const size_t num_rows) { - auto& arr = _get_null_map_data(); + auto& arr = get_null_map_data(); const size_t old_size = arr.size(); arr.resize(old_size + num_rows); @@ -274,21 +276,15 @@ void ColumnNullable::deserialize_vec(std::vector& keys, const size_t void ColumnNullable::insert_range_from_ignore_overflow(const doris::vectorized::IColumn& src, size_t start, size_t length) { const auto& nullable_col = assert_cast(src); - _get_null_map_column().insert_range_from(*nullable_col.null_map, start, length); + get_null_map_column().insert_range_from(nullable_col.get_null_map_column(), start, length); get_nested_column().insert_range_from_ignore_overflow(*nullable_col.nested_column, start, length); - const auto& src_null_map_data = nullable_col.get_null_map_data(); - _has_null = has_null(); - _has_null |= simd::contain_byte(src_null_map_data.data() + start, length, 1); } void ColumnNullable::insert_range_from(const IColumn& src, size_t start, size_t length) { const auto& nullable_col = assert_cast(src); - _get_null_map_column().insert_range_from(*nullable_col.null_map, start, length); + get_null_map_column().insert_range_from(nullable_col.get_null_map_column(), start, length); get_nested_column().insert_range_from(*nullable_col.nested_column, start, length); - const auto& src_null_map_data = nullable_col.get_null_map_data(); - _has_null = has_null(); - _has_null |= simd::contain_byte(src_null_map_data.data() + start, length, 1); } void ColumnNullable::insert_indices_from(const IColumn& src, const uint32_t* indices_begin, @@ -296,9 +292,8 @@ void ColumnNullable::insert_indices_from(const IColumn& src, const uint32_t* ind const auto& src_concrete = assert_cast(src); get_nested_column().insert_indices_from(src_concrete.get_nested_column(), indices_begin, indices_end); - _get_null_map_column().insert_indices_from(src_concrete.get_null_map_column(), indices_begin, - indices_end); - _need_update_has_null = true; + get_null_map_column().insert_indices_from(src_concrete.get_null_map_column(), indices_begin, + indices_end); } void ColumnNullable::insert_indices_from_not_has_null(const IColumn& src, @@ -307,17 +302,18 @@ void ColumnNullable::insert_indices_from_not_has_null(const IColumn& src, const auto& src_concrete = assert_cast(src); get_nested_column().insert_indices_from(src_concrete.get_nested_column(), indices_begin, indices_end); - _get_null_map_column().insert_many_defaults(indices_end - indices_begin); + _push_false_to_nullmap(indices_end - indices_begin); } void ColumnNullable::insert(const Field& x) { if (x.is_null()) { get_nested_column().insert_default(); - _get_null_map_data().push_back(1); + get_null_map_data().push_back(1); _has_null = true; + _need_update_has_null = false; } else { get_nested_column().insert(x); - _get_null_map_data().push_back(0); + _push_false_to_nullmap(1); } } @@ -325,19 +321,18 @@ void ColumnNullable::insert_from(const IColumn& src, size_t n) { const auto& src_concrete = assert_cast(src); get_nested_column().insert_from(src_concrete.get_nested_column(), n); auto is_null = src_concrete.get_null_map_data()[n]; - _has_null |= is_null; - _get_null_map_data().push_back(is_null); + get_null_map_data().push_back(is_null); } void ColumnNullable::insert_from_not_nullable(const IColumn& src, size_t n) { get_nested_column().insert_from(src, n); - _get_null_map_data().push_back(0); + _push_false_to_nullmap(1); } void ColumnNullable::insert_range_from_not_nullable(const IColumn& src, size_t start, size_t length) { get_nested_column().insert_range_from(src, start, length); - _get_null_map_data().resize_fill(_get_null_map_data().size() + length, 0); + _push_false_to_nullmap(length); } void ColumnNullable::insert_many_from_not_nullable(const IColumn& src, size_t position, @@ -366,15 +361,14 @@ size_t ColumnNullable::filter(const Filter& filter) { } Status ColumnNullable::filter_by_selector(const uint16_t* sel, size_t sel_size, IColumn* col_ptr) { - const auto* nullable_col_ptr = reinterpret_cast(col_ptr); + auto* nullable_col_ptr = assert_cast(col_ptr); ColumnPtr nest_col_ptr = nullable_col_ptr->nested_column; - ColumnPtr null_map_ptr = nullable_col_ptr->null_map; + + /// `get_null_map_data` will set `_need_update_has_null` to true + auto& res_nullmap = nullable_col_ptr->get_null_map_data(); + RETURN_IF_ERROR(get_nested_column().filter_by_selector( sel, sel_size, const_cast(nest_col_ptr.get()))); - //insert cur nullmap into result nullmap which is empty - auto& res_nullmap = reinterpret_cast*>( - const_cast(null_map_ptr.get())) - ->get_data(); DCHECK(res_nullmap.empty()); res_nullmap.resize(sel_size); auto& cur_nullmap = get_null_map_column().get_data(); @@ -522,15 +516,10 @@ void ColumnNullable::get_permutation(bool reverse, size_t limit, int null_direct } } } -// -//void ColumnNullable::gather(ColumnGathererStream & gatherer) -//{ -// gatherer.gather(*this); -//} void ColumnNullable::reserve(size_t n) { get_nested_column().reserve(n); - _get_null_map_data().reserve(n); + get_null_map_data(false).reserve(n); } void ColumnNullable::resize(size_t n) { @@ -582,7 +571,7 @@ void ColumnNullable::apply_null_map(const ColumnNullable& other) { } void ColumnNullable::check_consistency() const { - if (null_map->size() != get_nested_column().size()) { + if (get_null_map_column().size() != get_nested_column().size()) { throw Exception(ErrorCode::INTERNAL_ERROR, "Sizes of nested column and null map of Nullable column are not equal"); } @@ -596,8 +585,8 @@ void ColumnNullable::sort_column(const ColumnSorter* sorter, EqualFlags& flags, } void ColumnNullable::_update_has_null() { - const UInt8* null_pos = _get_null_map_data().data(); - _has_null = simd::contain_byte(null_pos, _get_null_map_data().size(), 1); + const UInt8* null_pos = get_null_map_data().data(); + _has_null = simd::contain_byte(null_pos, get_null_map_data().size(), 1); _need_update_has_null = false; } diff --git a/be/src/vec/columns/column_nullable.h b/be/src/vec/columns/column_nullable.h index 5425242aad7142..7772b6e80ade9b 100644 --- a/be/src/vec/columns/column_nullable.h +++ b/be/src/vec/columns/column_nullable.h @@ -21,7 +21,6 @@ #pragma once #include -#include #include #include #include @@ -50,6 +49,69 @@ class ColumnSorter; using NullMap = ColumnUInt8::Container; using ConstNullMapPtr = const NullMap*; +/// use this to avoid directly access null_map forgetting modify _need_update_has_null. see more in inner comments +class NullMapProvider { +public: + NullMapProvider() = default; + NullMapProvider(MutableColumnPtr&& null_map) : _null_map(std::move(null_map)) {} + void reset_null_map(MutableColumnPtr&& null_map) { _null_map = std::move(null_map); } + + // return the column that represents the byte map. if want use null_map, just call this. + const ColumnPtr& get_null_map_column_ptr() const { return _null_map; } + // for functions getting nullmap, we assume it will modify it. so set `_need_update_has_null` to true. if you know it wouldn't, + // call with arg false. but for the ops which will set _has_null themselves, call `update_has_null()` + MutableColumnPtr get_null_map_column_ptr(bool may_change = true) { + if (may_change) { + _need_update_has_null = true; + } + return _null_map->assume_mutable(); + } + ColumnUInt8::WrappedPtr& get_null_map(bool may_change = true) { + if (may_change) { + _need_update_has_null = true; + } + return _null_map; + } + + ColumnUInt8& get_null_map_column(bool may_change = true) { + if (may_change) { + _need_update_has_null = true; + } + return assert_cast(*_null_map); + } + const ColumnUInt8& get_null_map_column() const { + return assert_cast(*_null_map); + } + + NullMap& get_null_map_data(bool may_change = true) { + return get_null_map_column(may_change).get_data(); + } + const NullMap& get_null_map_data() const { return get_null_map_column().get_data(); } + + void clear_null_map() { assert_cast(_null_map.get())->clear(); } + + void update_has_null(bool new_value) { + _has_null = new_value; + _need_update_has_null = false; + } + +protected: + /** + * Here we have three variables which serve for `has_null()` judgement. If we have known the nullity of object, no need + * to check through the `null_map` to get the answer until the next time we modify it. Here `_has_null` is just the answer + * we cached. `_need_update_has_null` indicates there's modification or not since we got `_has_null()` last time. So in + * `_has_null()` we can check the two vars to know if there's need to update `has_null` or not. + * If you just want QUERY BUT NOT MODIFY, make sure the caller is const. There will be no perf overhead for const overload. + * Otherwise, this class, as the base class, will make it no possible to directly visit `null_map` forgetting to change the + * protected flags. Just call the interface is ok. + */ + bool _need_update_has_null = true; + bool _has_null = true; + +private: + IColumn::WrappedPtr _null_map; +}; + /// Class that specifies nullable columns. A nullable column represents /// a column, which may have any type, provided with the possibility of /// storing NULL values. For this purpose, a ColumnNullable object stores @@ -59,7 +121,7 @@ using ConstNullMapPtr = const NullMap*; /// over a bitmap because columns are usually stored on disk as compressed /// files. In this regard, using a bitmap instead of a byte map would /// greatly complicate the implementation with little to no benefits. -class ColumnNullable final : public COWHelper { +class ColumnNullable final : public COWHelper, public NullMapProvider { private: friend class COWHelper; @@ -89,10 +151,11 @@ class ColumnNullable final : public COWHelper { std::string get_name() const override { return "Nullable(" + nested_column->get_name() + ")"; } MutableColumnPtr clone_resized(size_t size) const override; size_t size() const override { - return assert_cast(*null_map).size(); + return assert_cast(get_null_map_column()) + .size(); } PURE bool is_null_at(size_t n) const override { - return assert_cast(*null_map) + return assert_cast(get_null_map_column()) .get_data()[n] != 0; } Field operator[](size_t n) const override; @@ -141,8 +204,13 @@ class ColumnNullable final : public COWHelper { assert_cast(nested_column.get()) ->insert_from(src_concrete.get_nested_column(), n); auto is_null = src_concrete.get_null_map_data()[n]; - _has_null |= is_null; - _get_null_map_data().push_back(is_null); + if (is_null) { + get_null_map_data().push_back(1); + _has_null = true; + _need_update_has_null = false; + } else { + _push_false_to_nullmap(1); + } } void insert_from_not_nullable(const IColumn& src, size_t n); @@ -150,19 +218,19 @@ class ColumnNullable final : public COWHelper { void insert_many_from_not_nullable(const IColumn& src, size_t position, size_t length); void insert_many_fix_len_data(const char* pos, size_t num) override { - _get_null_map_column().insert_many_vals(0, num); + _push_false_to_nullmap(num); get_nested_column().insert_many_fix_len_data(pos, num); } void insert_many_raw_data(const char* pos, size_t num) override { DCHECK(pos); - _get_null_map_column().insert_many_vals(0, num); + _push_false_to_nullmap(num); get_nested_column().insert_many_raw_data(pos, num); } void insert_many_dict_data(const int32_t* data_array, size_t start_index, const StringRef* dict, size_t data_num, uint32_t dict_num) override { - _get_null_map_column().insert_many_vals(0, data_num); + _push_false_to_nullmap(data_num); get_nested_column().insert_many_dict_data(data_array, start_index, dict, data_num, dict_num); } @@ -172,38 +240,40 @@ class ColumnNullable final : public COWHelper { if (UNLIKELY(num == 0)) { return; } - _get_null_map_column().insert_many_vals(0, num); + _push_false_to_nullmap(num); get_nested_column().insert_many_continuous_binary_data(data, offsets, num); } void insert_many_binary_data(char* data_array, uint32_t* len_array, uint32_t* start_offset_array, size_t num) override { - _get_null_map_column().insert_many_vals(0, num); + _push_false_to_nullmap(num); get_nested_column().insert_many_binary_data(data_array, len_array, start_offset_array, num); } void insert_default() override { get_nested_column().insert_default(); - _get_null_map_data().push_back(1); + get_null_map_data().push_back(1); _has_null = true; + _need_update_has_null = false; } void insert_many_defaults(size_t length) override { get_nested_column().insert_many_defaults(length); - _get_null_map_data().resize_fill(_get_null_map_data().size() + length, 1); + get_null_map_data().resize_fill(get_null_map_data().size() + length, 1); _has_null = true; + _need_update_has_null = false; } void insert_not_null_elements(size_t num) { get_nested_column().insert_many_defaults(num); - _get_null_map_column().insert_many_vals(0, num); - _has_null = false; + _push_false_to_nullmap(num); } void insert_null_elements(int num) { get_nested_column().insert_many_defaults(num); - _get_null_map_column().insert_many_vals(1, num); + get_null_map_column().insert_many_vals(1, num); _has_null = true; + _need_update_has_null = false; } void pop_back(size_t n) override; @@ -255,7 +325,7 @@ class ColumnNullable final : public COWHelper { void for_each_subcolumn(ColumnCallback callback) override { callback(nested_column); - callback(null_map); + callback(get_null_map()); } bool structure_equals(const IColumn& rhs) const override { @@ -281,11 +351,13 @@ class ColumnNullable final : public COWHelper { bool is_fixed_and_contiguous() const override { return false; } bool is_exclusive() const override { - return IColumn::is_exclusive() && nested_column->is_exclusive() && null_map->is_exclusive(); + return IColumn::is_exclusive() && nested_column->is_exclusive() && + get_null_map_column().is_exclusive(); } size_t size_of_value_if_fixed() const override { - return null_map->size_of_value_if_fixed() + nested_column->size_of_value_if_fixed(); + return get_null_map_column().size_of_value_if_fixed() + + nested_column->size_of_value_if_fixed(); } bool only_null() const override { return size() == 1 && is_null_at(0); } @@ -301,32 +373,12 @@ class ColumnNullable final : public COWHelper { MutableColumnPtr get_nested_column_ptr() { return nested_column->assume_mutable(); } - /// Return the column that represents the byte map. - const ColumnPtr& get_null_map_column_ptr() const { return null_map; } - - MutableColumnPtr get_null_map_column_ptr() { - _need_update_has_null = true; - return null_map->assume_mutable(); - } - - ColumnUInt8& get_null_map_column() { - _need_update_has_null = true; - return assert_cast(*null_map); - } - const ColumnUInt8& get_null_map_column() const { - return assert_cast(*null_map); - } - void clear() override { - null_map->clear(); + clear_null_map(); nested_column->clear(); _has_null = false; } - NullMap& get_null_map_data() { return get_null_map_column().get_data(); } - - const NullMap& get_null_map_data() const { return get_null_map_column().get_data(); } - /// Apply the null byte map of a specified nullable column onto the /// null byte map of the current column by performing an element-wise OR /// between both byte maps. This method is used to determine the null byte @@ -352,7 +404,8 @@ class ColumnNullable final : public COWHelper { DCHECK(size() > self_row); const auto& nullable_rhs = assert_cast(rhs); - null_map->replace_column_data(*nullable_rhs.null_map, row, self_row); + get_null_map_column().replace_column_data(nullable_rhs.get_null_map_column(), row, + self_row); if (!nullable_rhs.is_null_at(row)) { nested_column->replace_column_data(*nullable_rhs.nested_column, row, self_row); @@ -413,21 +466,15 @@ class ColumnNullable final : public COWHelper { } private: - // the two functions will not update `_need_update_has_null` - ColumnUInt8& _get_null_map_column() { - return assert_cast(*null_map); - } - NullMap& _get_null_map_data() { return _get_null_map_column().get_data(); } - - WrappedPtr nested_column; - WrappedPtr null_map; - - bool _need_update_has_null = true; - bool _has_null = true; - void _update_has_null(); + template void apply_null_map_impl(const ColumnUInt8& map); + + // push not null value wouldn't change the nullity. no need to update _has_null + void _push_false_to_nullmap(size_t num) { get_null_map_column(false).insert_many_vals(0, num); } + + WrappedPtr nested_column; }; ColumnPtr make_nullable(const ColumnPtr& column, bool is_nullable = false); diff --git a/be/src/vec/common/allocator.cpp b/be/src/vec/common/allocator.cpp index 2619c0bafffb16..19969abf6cca8c 100644 --- a/be/src/vec/common/allocator.cpp +++ b/be/src/vec/common/allocator.cpp @@ -229,7 +229,6 @@ void Allocator::throw_b throw doris::Exception(doris::ErrorCode::MEM_ALLOC_FAILED, err); } -#ifndef NDEBUG template void Allocator::add_address_sanitizers( void* buf, size_t size) const { @@ -251,7 +250,6 @@ void Allocator::remove_ #endif doris::thread_context()->thread_mem_tracker()->remove_address_sanitizers(buf, size); } -#endif template void* Allocator::alloc(size_t size, diff --git a/be/src/vec/common/allocator.h b/be/src/vec/common/allocator.h index 0427d0c968df7e..b05128bc6933cc 100644 --- a/be/src/vec/common/allocator.h +++ b/be/src/vec/common/allocator.h @@ -242,10 +242,8 @@ class Allocator { void consume_memory(size_t size) const; void release_memory(size_t size) const; void throw_bad_alloc(const std::string& err) const; -#ifndef NDEBUG void add_address_sanitizers(void* buf, size_t size) const; void remove_address_sanitizers(void* buf, size_t size) const; -#endif void* alloc(size_t size, size_t alignment = 0); void* realloc(void* buf, size_t old_size, size_t new_size, size_t alignment = 0); @@ -289,9 +287,7 @@ class Allocator { if constexpr (MemoryAllocator::need_record_actual_size()) { record_size = MemoryAllocator::allocated_size(buf); } -#ifndef NDEBUG add_address_sanitizers(buf, record_size); -#endif } else { buf = nullptr; int res = MemoryAllocator::posix_memalign(&buf, alignment, size); @@ -307,9 +303,7 @@ class Allocator { if constexpr (MemoryAllocator::need_record_actual_size()) { record_size = MemoryAllocator::allocated_size(buf); } -#ifndef NDEBUG add_address_sanitizers(buf, record_size); -#endif } } if constexpr (MemoryAllocator::need_record_actual_size()) { @@ -325,9 +319,7 @@ class Allocator { throw_bad_alloc(fmt::format("Allocator: Cannot munmap {}.", size)); } } else { -#ifndef NDEBUG remove_address_sanitizers(buf, size); -#endif MemoryAllocator::free(buf); } release_memory(size); @@ -351,9 +343,7 @@ class Allocator { if (!use_mmap || (old_size < doris::config::mmap_threshold && new_size < doris::config::mmap_threshold && alignment <= MALLOC_MIN_ALIGNMENT)) { -#ifndef NDEBUG remove_address_sanitizers(buf, old_size); -#endif /// Resize malloc'd memory region with no special alignment requirement. void* new_buf = MemoryAllocator::realloc(buf, new_size); if (nullptr == new_buf) { @@ -361,11 +351,8 @@ class Allocator { throw_bad_alloc(fmt::format("Allocator: Cannot realloc from {} to {}.", old_size, new_size)); } -#ifndef NDEBUG - add_address_sanitizers( - new_buf, - new_size); // usually, buf addr = new_buf addr, asan maybe not equal. -#endif + // usually, buf addr = new_buf addr, asan maybe not equal. + add_address_sanitizers(new_buf, new_size); buf = new_buf; if constexpr (clear_memory) @@ -395,10 +382,8 @@ class Allocator { // Big allocs that requires a copy. void* new_buf = alloc(new_size, alignment); memcpy(new_buf, buf, std::min(old_size, new_size)); -#ifndef NDEBUG add_address_sanitizers(new_buf, new_size); remove_address_sanitizers(buf, old_size); -#endif free(buf, old_size); buf = new_buf; } diff --git a/be/src/vec/common/sort/partition_sorter.cpp b/be/src/vec/common/sort/partition_sorter.cpp index c363a41d1c772e..9e2620d64df9fd 100644 --- a/be/src/vec/common/sort/partition_sorter.cpp +++ b/be/src/vec/common/sort/partition_sorter.cpp @@ -77,7 +77,14 @@ void PartitionSorter::reset_sorter_state(RuntimeState* runtime_state) { std::priority_queue empty_queue; std::swap(_block_priority_queue, empty_queue); _state = MergeSorterState::create_unique(_row_desc, _offset, _limit, runtime_state, nullptr); - _previous_row->reset(); + // _previous_row->impl inited at partition_sort_read function, + // but maybe call get_next after do_partition_topn_sort() function, and running into else if branch at line 92L + // so _previous_row->impl == nullptr and no need reset. + if (_previous_row->impl) { + _previous_row->reset(); + } + _output_total_rows = 0; + _output_distinct_rows = 0; } Status PartitionSorter::get_next(RuntimeState* state, Block* block, bool* eos) { diff --git a/be/src/vec/core/block.cpp b/be/src/vec/core/block.cpp index d4644fca4898d8..feb6466863df94 100644 --- a/be/src/vec/core/block.cpp +++ b/be/src/vec/core/block.cpp @@ -439,13 +439,9 @@ size_t Block::allocated_bytes() const { size_t res = 0; for (const auto& elem : data) { if (!elem.column) { - std::stringstream ss; - for (const auto& e : data) { - ss << e.name + " "; - } - throw Exception(ErrorCode::INTERNAL_ERROR, - "Column {} in block is nullptr, in method bytes. All Columns are {}", - elem.name, ss.str()); + // Sometimes if expr failed, then there will be a nullptr + // column left in the block. + continue; } res += elem.column->allocated_bytes(); } diff --git a/be/src/vec/core/types.h b/be/src/vec/core/types.h index f5943fa6e1d31f..c817c6ab273f42 100644 --- a/be/src/vec/core/types.h +++ b/be/src/vec/core/types.h @@ -403,7 +403,7 @@ std::string decimal_to_string(const T& value, UInt32 scale) { } if constexpr (std::is_same_v) { std::string num_str {wide::to_string(whole_part)}; - auto end = fmt::format_to(str.data() + pos, "{}", num_str); + auto* end = fmt::format_to(str.data() + pos, "{}", num_str); pos = end - str.data(); } else { auto end = fmt::format_to(str.data() + pos, "{}", whole_part); @@ -555,6 +555,15 @@ struct Decimal { } static Decimal from_int_frac(T integer, T fraction, int scale) { + if constexpr (std::is_same_v) { + return Decimal(integer * common::exp10_i32(scale) + fraction); + } else if constexpr (std::is_same_v) { + return Decimal(integer * common::exp10_i64(scale) + fraction); + } else if constexpr (std::is_same_v) { + return Decimal(integer * common::exp10_i128(scale) + fraction); + } else if constexpr (std::is_same_v) { + return Decimal(integer * common::exp10_i256(scale) + fraction); + } return Decimal(integer * int_exp10(scale) + fraction); } @@ -830,6 +839,7 @@ struct NativeType { using Type = wide::Int256; }; +// NOLINTBEGIN(readability-function-size) inline const char* getTypeName(TypeIndex idx) { switch (idx) { case TypeIndex::Nothing: @@ -935,6 +945,7 @@ inline const char* getTypeName(TypeIndex idx) { LOG(FATAL) << "__builtin_unreachable"; __builtin_unreachable(); } +// NOLINTEND(readability-function-size) } // namespace vectorized } // namespace doris diff --git a/be/src/vec/data_types/data_type_agg_state.h b/be/src/vec/data_types/data_type_agg_state.h index d7089503b0164c..35f86f23b2b223 100644 --- a/be/src/vec/data_types/data_type_agg_state.h +++ b/be/src/vec/data_types/data_type_agg_state.h @@ -122,9 +122,9 @@ class DataTypeAggState : public DataTypeString { DataTypePtr get_serialized_type() const { return _agg_serialized_type; } - void check_agg_state_compatibility(int read_be_exec_version) const { - BeExecVersionManager::check_agg_state_compatibility(read_be_exec_version, _be_exec_version, - get_nested_function()->get_name()); + void check_function_compatibility(int read_be_exec_version) const { + BeExecVersionManager::check_function_compatibility(read_be_exec_version, _be_exec_version, + get_nested_function()->get_name()); } private: diff --git a/be/src/vec/data_types/data_type_factory.cpp b/be/src/vec/data_types/data_type_factory.cpp index d09446b3a1cd61..2f5a4122f109c3 100644 --- a/be/src/vec/data_types/data_type_factory.cpp +++ b/be/src/vec/data_types/data_type_factory.cpp @@ -347,7 +347,8 @@ DataTypePtr DataTypeFactory::create_data_type(const TypeIndex& type_index, bool nested = std::make_shared(); break; default: - DCHECK(false) << "invalid typeindex:" << getTypeName(type_index); + throw doris::Exception(ErrorCode::INTERNAL_ERROR, "invalid typeindex: {}", + getTypeName(type_index)); break; } @@ -434,7 +435,6 @@ DataTypePtr DataTypeFactory::_create_primitive_data_type(const FieldType& type, result = vectorized::create_decimal(precision, scale, false); break; default: - DCHECK(false) << "Invalid FieldType:" << (int)type; result = nullptr; break; } @@ -596,7 +596,11 @@ DataTypePtr DataTypeFactory::create_data_type(const segment_v2::ColumnMetaPB& pc if (pcolumn.type() == static_cast(FieldType::OLAP_FIELD_TYPE_AGG_STATE)) { DataTypes data_types; for (auto child : pcolumn.children_columns()) { - data_types.push_back(DataTypeFactory::instance().create_data_type(child)); + auto type = DataTypeFactory::instance().create_data_type(child); + // may have length column with OLAP_FIELD_TYPE_UNSIGNED_BIGINT, then type will be nullptr + if (type) { + data_types.push_back(type); + } } nested = std::make_shared( data_types, pcolumn.result_is_nullable(), pcolumn.function_name(), @@ -631,92 +635,4 @@ DataTypePtr DataTypeFactory::create_data_type(const segment_v2::ColumnMetaPB& pc return nested; } -DataTypePtr DataTypeFactory::create_data_type(const arrow::DataType* type, bool is_nullable) { - DataTypePtr nested = nullptr; - switch (type->id()) { - case ::arrow::Type::BOOL: - nested = std::make_shared(); - break; - case ::arrow::Type::INT8: - nested = std::make_shared(); - break; - case ::arrow::Type::UINT8: - nested = std::make_shared(); - break; - case ::arrow::Type::INT16: - nested = std::make_shared(); - break; - case ::arrow::Type::UINT16: - nested = std::make_shared(); - break; - case ::arrow::Type::INT32: - nested = std::make_shared(); - break; - case ::arrow::Type::UINT32: - nested = std::make_shared(); - break; - case ::arrow::Type::INT64: - nested = std::make_shared(); - break; - case ::arrow::Type::UINT64: - nested = std::make_shared(); - break; - case ::arrow::Type::HALF_FLOAT: - case ::arrow::Type::FLOAT: - nested = std::make_shared(); - break; - case ::arrow::Type::DOUBLE: - nested = std::make_shared(); - break; - case ::arrow::Type::DATE32: - nested = std::make_shared(); - break; - case ::arrow::Type::DATE64: - case ::arrow::Type::TIMESTAMP: - nested = std::make_shared(); - break; - case ::arrow::Type::BINARY: - case ::arrow::Type::FIXED_SIZE_BINARY: - case ::arrow::Type::STRING: - nested = std::make_shared(); - break; - case ::arrow::Type::DECIMAL: - nested = std::make_shared>(); - break; - case ::arrow::Type::LIST: - DCHECK(type->num_fields() == 1); - nested = std::make_shared( - create_data_type(type->field(0)->type().get(), true)); - break; - case ::arrow::Type::MAP: - DCHECK(type->num_fields() == 2); - nested = std::make_shared( - create_data_type(type->field(0)->type().get(), true), - create_data_type(type->field(1)->type().get(), true)); - break; - case ::arrow::Type::STRUCT: { - size_t field_num = type->num_fields(); - DCHECK(type->num_fields() >= 1); - vectorized::DataTypes dataTypes; - vectorized::Strings names; - dataTypes.reserve(field_num); - names.reserve(field_num); - for (size_t i = 0; i < field_num; i++) { - dataTypes.push_back(create_data_type(type->field(i)->type().get(), true)); - names.push_back(type->field(i)->name()); - } - nested = std::make_shared(dataTypes, names); - break; - } - default: - DCHECK(false) << "invalid arrow type:" << (int)(type->id()); - break; - } - - if (nested && is_nullable) { - return make_nullable(nested); - } - return nested; -} - } // namespace doris::vectorized diff --git a/be/src/vec/data_types/data_type_factory.hpp b/be/src/vec/data_types/data_type_factory.hpp index 0439be02773e08..bf2a78d62420d1 100644 --- a/be/src/vec/data_types/data_type_factory.hpp +++ b/be/src/vec/data_types/data_type_factory.hpp @@ -68,8 +68,6 @@ class DataTypeFactory { DataTypePtr create_data_type(const PColumnMeta& pcolumn); DataTypePtr create_data_type(const segment_v2::ColumnMetaPB& pcolumn); - DataTypePtr create_data_type(const arrow::DataType* type, bool is_nullable); - DataTypePtr create_data_type(const TTypeDesc& raw_type) { return create_data_type(TypeDescriptor::from_thrift(raw_type), raw_type.is_nullable); } diff --git a/be/src/vec/exec/format/orc/vorc_reader.cpp b/be/src/vec/exec/format/orc/vorc_reader.cpp index cffa934cc2c740..16a3c1254c62eb 100644 --- a/be/src/vec/exec/format/orc/vorc_reader.cpp +++ b/be/src/vec/exec/format/orc/vorc_reader.cpp @@ -1140,8 +1140,9 @@ Status OrcReader::_decode_string_non_dict_encoded_column(const std::string& col_ if (cvb->hasNulls) { for (int i = 0; i < num_values; ++i) { if (cvb->notNull[i]) { - string_values.emplace_back(cvb->data[i], - trim_right(cvb->data[i], cvb->length[i])); + size_t length = trim_right(cvb->data[i], cvb->length[i]); + string_values.emplace_back((length > 0) ? cvb->data[i] : empty_string.data(), + length); } else { // Orc doesn't fill null values in new batch, but the former batch has been release. // Other types like int/long/timestamp... are flat types without pointer in them, @@ -1151,21 +1152,26 @@ Status OrcReader::_decode_string_non_dict_encoded_column(const std::string& col_ } } else { for (int i = 0; i < num_values; ++i) { - string_values.emplace_back(cvb->data[i], trim_right(cvb->data[i], cvb->length[i])); + size_t length = trim_right(cvb->data[i], cvb->length[i]); + string_values.emplace_back((length > 0) ? cvb->data[i] : empty_string.data(), + length); } } } else { if (cvb->hasNulls) { for (int i = 0; i < num_values; ++i) { if (cvb->notNull[i]) { - string_values.emplace_back(cvb->data[i], cvb->length[i]); + string_values.emplace_back( + (cvb->length[i] > 0) ? cvb->data[i] : empty_string.data(), + cvb->length[i]); } else { string_values.emplace_back(empty_string.data(), 0); } } } else { for (int i = 0; i < num_values; ++i) { - string_values.emplace_back(cvb->data[i], cvb->length[i]); + string_values.emplace_back( + (cvb->length[i] > 0) ? cvb->data[i] : empty_string.data(), cvb->length[i]); } } } @@ -1204,7 +1210,8 @@ Status OrcReader::_decode_string_dict_encoded_column(const std::string& col_name if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } else { // Orc doesn't fill null values in new batch, but the former batch has been release. // Other types like int/long/timestamp... are flat types without pointer in them, @@ -1227,7 +1234,8 @@ Status OrcReader::_decode_string_dict_encoded_column(const std::string& col_name if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } } } else { @@ -1246,7 +1254,8 @@ Status OrcReader::_decode_string_dict_encoded_column(const std::string& col_name if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } else { string_values.emplace_back(EMPTY_STRING_FOR_OVERFLOW, 0); } @@ -1265,7 +1274,8 @@ Status OrcReader::_decode_string_dict_encoded_column(const std::string& col_name if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } } } @@ -2068,7 +2078,7 @@ Status OrcReader::on_string_dicts_loaded( char* val_ptr; int64_t length; dict->getValueByIndex(i, val_ptr, length); - StringRef dict_value(val_ptr, length); + StringRef dict_value((length > 0) ? val_ptr : "", length); if (length > max_value_length) { max_value_length = length; } @@ -2328,7 +2338,8 @@ MutableColumnPtr OrcReader::_convert_dict_column_to_string_column( if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } else { // Orc doesn't fill null values in new batch, but the former batch has been release. // Other types like int/long/timestamp... are flat types without pointer in them, @@ -2346,7 +2357,8 @@ MutableColumnPtr OrcReader::_convert_dict_column_to_string_column( if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } } } else { @@ -2361,7 +2373,8 @@ MutableColumnPtr OrcReader::_convert_dict_column_to_string_column( if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } else { string_values.emplace_back(EMPTY_STRING_FOR_OVERFLOW, 0); } @@ -2375,7 +2388,8 @@ MutableColumnPtr OrcReader::_convert_dict_column_to_string_column( if (length > max_value_length) { max_value_length = length; } - string_values.emplace_back(val_ptr, length); + string_values.emplace_back((length > 0) ? val_ptr : EMPTY_STRING_FOR_OVERFLOW, + length); } } } diff --git a/be/src/vec/exec/format/parquet/byte_array_dict_decoder.cpp b/be/src/vec/exec/format/parquet/byte_array_dict_decoder.cpp index 7d9f708011c4e5..4be7cb8b667950 100644 --- a/be/src/vec/exec/format/parquet/byte_array_dict_decoder.cpp +++ b/be/src/vec/exec/format/parquet/byte_array_dict_decoder.cpp @@ -32,6 +32,9 @@ namespace doris::vectorized { Status ByteArrayDictDecoder::set_dict(std::unique_ptr& dict, int32_t length, size_t num_values) { _dict = std::move(dict); + if (_dict == nullptr) { + return Status::Corruption("Wrong dictionary data for byte array type, dict is null."); + } _dict_items.reserve(num_values); uint32_t offset_cursor = 0; char* dict_item_address = reinterpret_cast(_dict.get()); diff --git a/be/src/vec/exec/format/parquet/fix_length_dict_decoder.hpp b/be/src/vec/exec/format/parquet/fix_length_dict_decoder.hpp index 0bcc0bd5e73a40..6e7d3c7b99d8e0 100644 --- a/be/src/vec/exec/format/parquet/fix_length_dict_decoder.hpp +++ b/be/src/vec/exec/format/parquet/fix_length_dict_decoder.hpp @@ -107,6 +107,9 @@ class FixLengthDictDecoder final : public BaseDictDecoder { return Status::Corruption("Wrong dictionary data for fixed length type"); } _dict = std::move(dict); + if (_dict == nullptr) { + return Status::Corruption("Wrong dictionary data for byte array type, dict is null."); + } char* dict_item_address = reinterpret_cast(_dict.get()); _dict_items.resize(num_values); for (size_t i = 0; i < num_values; ++i) { diff --git a/be/src/vec/exec/scan/scanner_context.cpp b/be/src/vec/exec/scan/scanner_context.cpp index b5cb47fda1bdf5..cbb3d0f572365b 100644 --- a/be/src/vec/exec/scan/scanner_context.cpp +++ b/be/src/vec/exec/scan/scanner_context.cpp @@ -42,7 +42,8 @@ ScannerContext::ScannerContext( RuntimeState* state, pipeline::ScanLocalStateBase* local_state, const TupleDescriptor* output_tuple_desc, const RowDescriptor* output_row_descriptor, const std::list>& scanners, int64_t limit_, - std::shared_ptr dependency, bool ignore_data_distribution) + int64_t max_bytes_in_blocks_queue, std::shared_ptr dependency, + const int num_parallel_instances) : HasTaskExecutionCtx(state), _state(state), _local_state(local_state), @@ -52,102 +53,53 @@ ScannerContext::ScannerContext( _output_row_descriptor(output_row_descriptor), _batch_size(state->batch_size()), limit(limit_), + _max_bytes_in_queue(std::max(max_bytes_in_blocks_queue, (int64_t)1024) * + num_parallel_instances), _scanner_scheduler(state->exec_env()->scanner_scheduler()), _all_scanners(scanners.begin(), scanners.end()), - _ignore_data_distribution(ignore_data_distribution) { + _num_parallel_instances(num_parallel_instances) { DCHECK(_output_row_descriptor == nullptr || _output_row_descriptor->tuple_descriptors().size() == 1); _query_id = _state->get_query_ctx()->query_id(); ctx_id = UniqueId::gen_uid().to_string(); - _scanners.enqueue_bulk(scanners.begin(), scanners.size()); - if (limit < 0) { - limit = -1; - } - MAX_SCALE_UP_RATIO = _state->scanner_scale_up_ratio(); - _query_thread_context = {_query_id, _state->query_mem_tracker(), - _state->get_query_ctx()->workload_group()}; - _dependency = dependency; -} - -// After init function call, should not access _parent -Status ScannerContext::init() { - _scanner_profile = _local_state->_scanner_profile; - _scanner_sched_counter = _local_state->_scanner_sched_counter; - _newly_create_free_blocks_num = _local_state->_newly_create_free_blocks_num; - _scanner_wait_batch_timer = _local_state->_scanner_wait_batch_timer; - _scanner_ctx_sched_time = _local_state->_scanner_ctx_sched_time; - _scale_up_scanners_counter = _local_state->_scale_up_scanners_counter; - -#ifndef BE_TEST - // 3. get thread token - if (_state->get_query_ctx()) { - thread_token = _state->get_query_ctx()->get_token(); - _simple_scan_scheduler = _state->get_query_ctx()->get_scan_scheduler(); - if (_simple_scan_scheduler) { - _should_reset_thread_name = false; - } - _remote_scan_task_scheduler = _state->get_query_ctx()->get_remote_scan_scheduler(); - } -#endif - _local_state->_runtime_profile->add_info_string("UseSpecificThreadToken", - thread_token == nullptr ? "False" : "True"); - - int num_parallel_instances = _state->query_parallel_instance_num(); - - // NOTE: When ignore_data_distribution is true, the parallelism - // of the scan operator is regarded as 1 (actually maybe not). - // That will make the number of scan task can be submitted to the scheduler - // in a vary large value. This logicl is kept from the older implementation. - // https://github.com/apache/doris/pull/28266 - if (_ignore_data_distribution) { - num_parallel_instances = 1; - } - - // _max_bytes_in_queue controls the maximum memory that can be used by a single scan instance. - // scan_queue_mem_limit on FE is 100MB by default, on backend we will make sure its actual value - // is larger than 10MB. - _max_bytes_in_queue = std::max(_state->scan_queue_mem_limit(), (int64_t)1024 * 1024 * 10); - // Provide more memory for wide tables, increase proportionally by multiples of 300 _max_bytes_in_queue *= _output_tuple_desc->slots().size() / 300 + 1; - - // TODO: Where is the proper position to place this code? - if (_all_scanners.empty()) { + if (scanners.empty()) { _is_finished = true; _set_scanner_done(); } - + _scanners.enqueue_bulk(scanners.begin(), scanners.size()); + if (limit < 0) { + limit = -1; + } + MAX_SCALE_UP_RATIO = _state->scanner_scale_up_ratio(); // _max_thread_num controls how many scanners of this ScanOperator can be submitted to scheduler at a time. // The overall target of our system is to make full utilization of the resources. - // At the same time, we dont want too many tasks are queued by scheduler, that is not necessary. - // So, first of all, we try to make sure _max_thread_num of a ScanNode of a query on a single backend is less than - // 2 * config::doris_scanner_thread_pool_thread_num, so that we can make all io threads busy. + // At the same time, we dont want too many tasks are queued by scheduler, that makes the query + // waiting too long, and existing task can not be scheduled in time. + // First of all, we try to make sure _max_thread_num of a ScanNode of a query on a single backend is less than + // config::doris_scanner_thread_pool_thread_num. // For example, on a 64-core machine, the default value of config::doris_scanner_thread_pool_thread_num will be 64*2 =128. // and the num_parallel_instances of this scan operator will be 64/2=32. - // For a query who has one scan nodes, the _max_thread_num of each scan node instance will be 2 * 128 / 32 = 8. - // We have 32 instances of this scan operator, so for the ScanNode, we have 8 * 32 = 256 scanner tasks can be submitted at a time. - // The thread pool of scanner is 128, that means we will have 128 tasks running in parallel and another 128 tasks are waiting in the queue. - // When first 128 tasks are finished, the next 128 tasks will be extricated from the queue and be executed, - // and another 128 tasks will be submitted to the queue if there are remaining. + // For a query who has two scan nodes, the _max_thread_num of each scan node instance will be 128 / 32 = 4. + // We have 32 instances of this scan operator, so for the ScanNode, we have 4 * 32 = 128 scanner tasks can be submitted at a time. + // Remember that we have to ScanNode in this query, so the total number of scanner tasks can be submitted at a time is 128 * 2 = 256. _max_thread_num = _state->num_scanner_threads() > 0 ? _state->num_scanner_threads() - : 2 * (config::doris_scanner_thread_pool_thread_num / num_parallel_instances); + : config::doris_scanner_thread_pool_thread_num / num_parallel_instances; _max_thread_num = _max_thread_num == 0 ? 1 : _max_thread_num; // In some situation, there are not too many big tablets involed, so we can reduce the thread number. - // NOTE: when _all_scanners.size is zero, the _max_thread_num will be 0. - _max_thread_num = std::min(_max_thread_num, (int32_t)_all_scanners.size()); - + _max_thread_num = std::min(_max_thread_num, (int32_t)scanners.size()); // 1. Calculate max concurrency // For select * from table limit 10; should just use one thread. if (_local_state->should_run_serial()) { _max_thread_num = 1; } - // when user not specify scan_thread_num, so we can try downgrade _max_thread_num. // becaue we found in a table with 5k columns, column reader may ocuppy too much memory. // you can refer https://github.com/apache/doris/issues/35340 for details. - int32_t max_column_reader_num = _state->query_options().max_column_reader_num; + int32_t max_column_reader_num = state->query_options().max_column_reader_num; if (_max_thread_num != 1 && max_column_reader_num > 0) { int32_t scan_column_num = _output_tuple_desc->slots().size(); int32_t current_column_num = scan_column_num * _max_thread_num; @@ -157,7 +109,7 @@ Status ScannerContext::init() { if (new_max_thread_num < _max_thread_num) { int32_t origin_max_thread_num = _max_thread_num; _max_thread_num = new_max_thread_num; - LOG(INFO) << "downgrade query:" << print_id(_state->query_id()) + LOG(INFO) << "downgrade query:" << print_id(state->query_id()) << " scan's max_thread_num from " << origin_max_thread_num << " to " << _max_thread_num << ",column num: " << scan_column_num << ", max_column_reader_num: " << max_column_reader_num; @@ -165,7 +117,35 @@ Status ScannerContext::init() { } } + _query_thread_context = {_query_id, _state->query_mem_tracker(), + _state->get_query_ctx()->workload_group()}; + _dependency = dependency; +} + +// After init function call, should not access _parent +Status ScannerContext::init() { + _scanner_profile = _local_state->_scanner_profile; + _scanner_sched_counter = _local_state->_scanner_sched_counter; + _newly_create_free_blocks_num = _local_state->_newly_create_free_blocks_num; + _scanner_wait_batch_timer = _local_state->_scanner_wait_batch_timer; + _scanner_ctx_sched_time = _local_state->_scanner_ctx_sched_time; + _scale_up_scanners_counter = _local_state->_scale_up_scanners_counter; + +#ifndef BE_TEST + // 3. get thread token + if (_state->get_query_ctx()) { + thread_token = _state->get_query_ctx()->get_token(); + _simple_scan_scheduler = _state->get_query_ctx()->get_scan_scheduler(); + if (_simple_scan_scheduler) { + _should_reset_thread_name = false; + } + _remote_scan_task_scheduler = _state->get_query_ctx()->get_remote_scan_scheduler(); + } +#endif + COUNTER_SET(_local_state->_max_scanner_thread_num, (int64_t)_max_thread_num); + _local_state->_runtime_profile->add_info_string("UseSpecificThreadToken", + thread_token == nullptr ? "False" : "True"); // submit `_max_thread_num` running scanners to `ScannerScheduler` // When a running scanners is finished, it will submit one of the remaining scanners. diff --git a/be/src/vec/exec/scan/scanner_context.h b/be/src/vec/exec/scan/scanner_context.h index 36eb20c220def4..03c4e5a4f1bba7 100644 --- a/be/src/vec/exec/scan/scanner_context.h +++ b/be/src/vec/exec/scan/scanner_context.h @@ -105,8 +105,9 @@ class ScannerContext : public std::enable_shared_from_this, const TupleDescriptor* output_tuple_desc, const RowDescriptor* output_row_descriptor, const std::list>& scanners, - int64_t limit_, std::shared_ptr dependency, - bool ignore_data_distribution); + int64_t limit_, int64_t max_bytes_in_blocks_queue, + std::shared_ptr dependency, + const int num_parallel_instances); ~ScannerContext() override { SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_thread_context.query_mem_tracker); @@ -209,7 +210,7 @@ class ScannerContext : public std::enable_shared_from_this, int64_t limit; int32_t _max_thread_num = 0; - int64_t _max_bytes_in_queue = 0; + int64_t _max_bytes_in_queue; doris::vectorized::ScannerScheduler* _scanner_scheduler; SimplifiedScanScheduler* _simple_scan_scheduler = nullptr; SimplifiedScanScheduler* _remote_scan_task_scheduler = nullptr; @@ -219,6 +220,7 @@ class ScannerContext : public std::enable_shared_from_this, int32_t _num_running_scanners = 0; // weak pointer for _scanners, used in stop function std::vector> _all_scanners; + const int _num_parallel_instances; std::shared_ptr _scanner_profile; RuntimeProfile::Counter* _scanner_sched_counter = nullptr; RuntimeProfile::Counter* _newly_create_free_blocks_num = nullptr; @@ -227,7 +229,6 @@ class ScannerContext : public std::enable_shared_from_this, RuntimeProfile::Counter* _scale_up_scanners_counter = nullptr; QueryThreadContext _query_thread_context; std::shared_ptr _dependency = nullptr; - bool _ignore_data_distribution = false; // for scaling up the running scanners size_t _estimated_block_size = 0; diff --git a/be/src/vec/exec/scan/scanner_scheduler.cpp b/be/src/vec/exec/scan/scanner_scheduler.cpp index 444ff4dbb0cd9f..fdd677f0687d30 100644 --- a/be/src/vec/exec/scan/scanner_scheduler.cpp +++ b/be/src/vec/exec/scan/scanner_scheduler.cpp @@ -278,14 +278,14 @@ void ScannerScheduler::_scanner_scan(std::shared_ptr ctx, ctx->update_peak_memory_usage(free_block->allocated_bytes()); ctx->update_peak_memory_usage(-free_block->allocated_bytes()); status = scanner->get_block_after_projects(state, free_block.get(), &eos); - // Projection will truncate useless columns, makes block size change. - auto free_block_bytes = free_block->allocated_bytes(); - ctx->update_peak_memory_usage(free_block_bytes); first_read = false; if (!status.ok()) { LOG(WARNING) << "Scan thread read VScanner failed: " << status.to_string(); break; } + // Projection will truncate useless columns, makes block size change. + auto free_block_bytes = free_block->allocated_bytes(); + ctx->update_peak_memory_usage(free_block_bytes); raw_bytes_read += free_block_bytes; if (!scan_task->cached_blocks.empty() && scan_task->cached_blocks.back().first->rows() + free_block->rows() <= diff --git a/be/src/vec/exprs/vmatch_predicate.cpp b/be/src/vec/exprs/vmatch_predicate.cpp index 9dea7ca870c197..e7822276c6d3b6 100644 --- a/be/src/vec/exprs/vmatch_predicate.cpp +++ b/be/src/vec/exprs/vmatch_predicate.cpp @@ -22,28 +22,24 @@ #pragma clang diagnostic ignored "-Wshadow-field" #endif -#include #include #include // IWYU pragma: keep #include #include #include -#include #include #include #include #include -#include "CLucene/analysis/standard95/StandardAnalyzer.h" #include "common/status.h" +#include "olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h" #include "olap/rowset/segment_v2/inverted_index_reader.h" #include "vec/core/block.h" #include "vec/core/column_numbers.h" #include "vec/core/column_with_type_and_name.h" -#include "vec/core/columns_with_type_and_name.h" #include "vec/exprs/vexpr_context.h" -#include "vec/exprs/vliteral.h" #include "vec/exprs/vslot_ref.h" #include "vec/functions/simple_function_factory.h" @@ -61,13 +57,13 @@ VMatchPredicate::VMatchPredicate(const TExprNode& node) : VExpr(node) { get_inverted_index_parser_type_from_string(node.match_predicate.parser_type); _inverted_index_ctx->parser_mode = node.match_predicate.parser_mode; _inverted_index_ctx->char_filter_map = node.match_predicate.char_filter_map; - _analyzer = InvertedIndexReader::create_analyzer(_inverted_index_ctx.get()); - _analyzer->set_lowercase(node.match_predicate.parser_lowercase); - if (node.match_predicate.parser_stopwords == "none") { - _analyzer->set_stopwords(nullptr); + if (node.match_predicate.parser_lowercase) { + _inverted_index_ctx->lower_case = INVERTED_INDEX_PARSER_TRUE; } else { - _analyzer->set_stopwords(&lucene::analysis::standard95::stop_words); + _inverted_index_ctx->lower_case = INVERTED_INDEX_PARSER_FALSE; } + _inverted_index_ctx->stop_words = node.match_predicate.parser_stopwords; + _analyzer = inverted_index::InvertedIndexAnalyzer::create_analyzer(_inverted_index_ctx.get()); _inverted_index_ctx->analyzer = _analyzer.get(); } diff --git a/be/src/vec/functions/array/function_array_index.h b/be/src/vec/functions/array/function_array_index.h index e4426e95ce39da..2339fe4c5097d9 100644 --- a/be/src/vec/functions/array/function_array_index.h +++ b/be/src/vec/functions/array/function_array_index.h @@ -188,11 +188,6 @@ class FunctionArrayIndex : public IFunction { roaring->cardinality(), result_bitmap); } }) - if (iter->has_null()) { - segment_v2::InvertedIndexQueryCacheHandle null_bitmap_cache_handle; - RETURN_IF_ERROR(iter->read_null_bitmap(&null_bitmap_cache_handle)); - null_bitmap = null_bitmap_cache_handle.get_bitmap(); - } segment_v2::InvertedIndexResultBitmap result(roaring, null_bitmap); bitmap_result = result; bitmap_result.mask_out_null(); diff --git a/be/src/vec/functions/date_format_type.h b/be/src/vec/functions/date_format_type.h new file mode 100644 index 00000000000000..071ecf44853e1d --- /dev/null +++ b/be/src/vec/functions/date_format_type.h @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "vec/common/string_ref.h" + +namespace doris::vectorized::time_format_type { +// Used to optimize commonly used date formats. + +inline StringRef rewrite_specific_format(const char* raw_str, size_t str_size) { + const static std::string specific_format_strs[3] = {"%Y%m%d", "%Y-%m-%d", "%Y-%m-%d %H:%i:%s"}; + const static std::string specific_format_rewrite[3] = {"yyyyMMdd", "yyyy-MM-dd", + "yyyy-MM-dd HH:mm:ss"}; + for (int i = 0; i < 3; i++) { + const StringRef specific_format {specific_format_strs[i].data(), + specific_format_strs[i].size()}; + if (specific_format == StringRef {raw_str, str_size}) { + return {specific_format_rewrite[i].data(), specific_format_rewrite[i].size()}; + } + } + return {raw_str, str_size}; +} + +template +void put_year(T y, char* buf, int& i) { + int t = y / 100; + buf[i++] = t / 10 + '0'; + buf[i++] = t % 10 + '0'; + + t = y % 100; + buf[i++] = t / 10 + '0'; + buf[i++] = t % 10 + '0'; +} + +template +void put_other(T m, char* buf, int& i) { + buf[i++] = m / 10 + '0'; + buf[i++] = m % 10 + '0'; +} + +// NoneImpl indicates that no specific optimization has been applied, and the general logic is used for processing. +struct NoneImpl {}; + +struct yyyyMMddImpl { + template + size_t static date_to_str(const DateType& date_value, char* buf) { + int i = 0; + put_year(date_value.year(), buf, i); + put_other(date_value.month(), buf, i); + put_other(date_value.day(), buf, i); + return i; + } +}; + +struct yyyy_MM_ddImpl { + template + size_t static date_to_str(const DateType& date_value, char* buf) { + int i = 0; + put_year(date_value.year(), buf, i); + buf[i++] = '-'; + put_other(date_value.month(), buf, i); + buf[i++] = '-'; + put_other(date_value.day(), buf, i); + return i; + } +}; + +struct yyyy_MM_dd_HH_mm_ssImpl { + template + size_t static date_to_str(const DateType& date_value, char* buf) { + int i = 0; + put_year(date_value.year(), buf, i); + buf[i++] = '-'; + put_other(date_value.month(), buf, i); + buf[i++] = '-'; + put_other(date_value.day(), buf, i); + buf[i++] = ' '; + put_other(date_value.hour(), buf, i); + buf[i++] = ':'; + put_other(date_value.minute(), buf, i); + buf[i++] = ':'; + put_other(date_value.second(), buf, i); + return i; + } +}; + +struct yyyy_MMImpl { + template + size_t static date_to_str(const DateType& date_value, char* buf) { + int i = 0; + put_year(date_value.year(), buf, i); + buf[i++] = '-'; + put_other(date_value.month(), buf, i); + return i; + } +}; +struct yyyyMMImpl { + template + size_t static date_to_str(const DateType& date_value, char* buf) { + int i = 0; + put_year(date_value.year(), buf, i); + put_other(date_value.month(), buf, i); + return i; + } +}; + +struct yyyyImpl { + template + size_t static date_to_str(const DateType& date_value, char* buf) { + int i = 0; + put_year(date_value.year(), buf, i); + return i; + } +}; + +using FormatImplVariant = std::variant; + +const static std::string default_format = "yyyy-MM-dd HH:mm:ss"; +const static auto default_impl = yyyy_MM_dd_HH_mm_ssImpl {}; +inline FormatImplVariant string_to_impl(const std::string& format) { + if (format == "yyyyMMdd" || format == "%Y%m%d") { + return yyyyMMddImpl {}; + } else if (format == "yyyy-MM-dd" || format == "%Y-%m-%d") { + return yyyy_MM_ddImpl {}; + } else if (format == "yyyy-MM-dd HH:mm:ss" || format == "%Y-%m-%d %H:%i:%s") { + return yyyy_MM_dd_HH_mm_ssImpl {}; + } else if (format == "yyyy-MM") { + return yyyy_MMImpl {}; + } else if (format == "yyyyMM") { + return yyyyMMImpl {}; + } else if (format == "yyyy") { + return yyyyImpl {}; + } else { + return NoneImpl {}; + } +} + +} // namespace doris::vectorized::time_format_type diff --git a/be/src/vec/functions/date_time_transforms.h b/be/src/vec/functions/date_time_transforms.h index 266c9b5d272d38..73155afae3a996 100644 --- a/be/src/vec/functions/date_time_transforms.h +++ b/be/src/vec/functions/date_time_transforms.h @@ -33,6 +33,7 @@ #include "vec/core/types.h" #include "vec/data_types/data_type_date_time.h" #include "vec/data_types/data_type_string.h" +#include "vec/functions/date_format_type.h" #include "vec/runtime/vdatetime_value.h" #include "vec/utils/util.hpp" @@ -156,6 +157,29 @@ struct DayNameImpl { } }; +template +struct ToIso8601Impl { + using OpArgType = ArgType; + static constexpr auto name = "to_iso8601"; + static constexpr auto max_size = std::is_same_v ? 10 : 26; + + static inline auto execute(const typename DateTraits::T& dt, + ColumnString::Chars& res_data, size_t& offset) { + auto length = dt.to_buffer((char*)res_data.data() + offset, + std::is_same_v ? -1 : 6); + if (std::is_same_v) { + res_data[offset + 10] = 'T'; + } + + offset += length; + return offset; + } + + static DataTypes get_variadic_argument_types() { + return {std::make_shared::DateType>()}; + } +}; + template struct MonthNameImpl { using OpArgType = ArgType; @@ -184,34 +208,44 @@ struct DateFormatImpl { static constexpr auto name = "date_format"; - static inline auto execute(const FromType& t, StringRef format, ColumnString::Chars& res_data, - size_t& offset) { - const auto& dt = (DateType&)t; - if (format.size > 128) { - return std::pair {offset, true}; - } - char buf[100 + SAFE_FORMAT_STRING_MARGIN]; - if (!dt.to_format_string_conservative(format.data, format.size, buf, - 100 + SAFE_FORMAT_STRING_MARGIN)) { - return std::pair {offset, true}; - } + template + static inline bool execute(const FromType& t, StringRef format, ColumnString::Chars& res_data, + size_t& offset, const cctz::time_zone& time_zone) { + if constexpr (std::is_same_v) { + // Handle non-special formats. + const auto& dt = (DateType&)t; + char buf[100 + SAFE_FORMAT_STRING_MARGIN]; + if (!dt.to_format_string_conservative(format.data, format.size, buf, + 100 + SAFE_FORMAT_STRING_MARGIN)) { + return true; + } - auto len = strlen(buf); - res_data.insert(buf, buf + len); - offset += len; - return std::pair {offset, false}; + auto len = strlen(buf); + res_data.insert(buf, buf + len); + offset += len; + return false; + } else { + const auto& dt = (DateType&)t; + + if (!dt.is_valid_date()) { + return true; + } + + // No buffer is needed here because these specially optimized formats have fixed lengths, + // and sufficient memory has already been reserved. + auto len = Impl::date_to_str(dt, (char*)res_data.data() + offset); + offset += len; + + return false; + } } static DataTypes get_variadic_argument_types() { - return std::vector { - std::dynamic_pointer_cast( - std::make_shared::DateType>()), - std::dynamic_pointer_cast( - std::make_shared())}; + return std::vector {std::make_shared::DateType>(), + std::make_shared()}; } }; -// TODO: This function should be depend on arguments not always nullable template struct FromUnixTimeImpl { using FromType = Int64; @@ -220,24 +254,45 @@ struct FromUnixTimeImpl { static const int64_t TIMESTAMP_VALID_MAX = 32536771199; static constexpr auto name = "from_unixtime"; - static inline auto execute(FromType val, StringRef format, ColumnString::Chars& res_data, + template + static inline bool execute(const FromType& val, StringRef format, ColumnString::Chars& res_data, size_t& offset, const cctz::time_zone& time_zone) { - DateType dt; - if (format.size > 128 || val < 0 || val > TIMESTAMP_VALID_MAX) { - return std::pair {offset, true}; - } - dt.from_unixtime(val, time_zone); + if constexpr (std::is_same_v) { + DateType dt; + if (val < 0 || val > TIMESTAMP_VALID_MAX) { + return true; + } + dt.from_unixtime(val, time_zone); - char buf[100 + SAFE_FORMAT_STRING_MARGIN]; - if (!dt.to_format_string_conservative(format.data, format.size, buf, - 100 + SAFE_FORMAT_STRING_MARGIN)) { - return std::pair {offset, true}; - } + char buf[100 + SAFE_FORMAT_STRING_MARGIN]; + if (!dt.to_format_string_conservative(format.data, format.size, buf, + 100 + SAFE_FORMAT_STRING_MARGIN)) { + return true; + } - auto len = strlen(buf); - res_data.insert(buf, buf + len); - offset += len; - return std::pair {offset, false}; + auto len = strlen(buf); + res_data.insert(buf, buf + len); + offset += len; + return false; + + } else { + DateType dt; + if (val < 0 || val > TIMESTAMP_VALID_MAX) { + return true; + } + dt.from_unixtime(val, time_zone); + + if (!dt.is_valid_date()) { + return true; + } + + // No buffer is needed here because these specially optimized formats have fixed lengths, + // and sufficient memory has already been reserved. + auto len = Impl::date_to_str(dt, (char*)res_data.data() + offset); + offset += len; + + return false; + } } }; diff --git a/be/src/vec/functions/function_conv.cpp b/be/src/vec/functions/function_conv.cpp index 78abad9f342ecf..9db79b89993a91 100644 --- a/be/src/vec/functions/function_conv.cpp +++ b/be/src/vec/functions/function_conv.cpp @@ -205,13 +205,20 @@ struct ConvStringImpl { ColumnString* result_column, NullMap& result_null_map, size_t index) { StringRef str = data_column->get_data_at(index); + auto new_size = str.size; + // eg: select conv('1.464868',10,2); the result should be return 1. + // But StringParser::string_to_int will PARSE_FAILURE and return 0, + // so should handle the point part of number firstly if need convert '1.464868' to number 1 + if (auto pos = str.to_string_view().find_first_of('.'); pos != std::string::npos) { + new_size = pos; + } StringParser::ParseResult parse_res; // select conv('ffffffffffffff', 24, 2); // if 'ffffffffffffff' parse as int64_t will be overflow, will be get max value: std::numeric_limits::max() // so change it parse as uint64_t, and return value could still use int64_t, in function decimal_to_base could handle it. // But if the value is still overflow in uint64_t, will get max value of uint64_t int64_t decimal_num = - StringParser::string_to_int(str.data, str.size, src_base, &parse_res); + StringParser::string_to_int(str.data, new_size, src_base, &parse_res); if (src_base < 0 && decimal_num >= 0) { result_null_map[index] = true; result_column->insert_default(); diff --git a/be/src/vec/functions/function_date_or_datetime_to_string.cpp b/be/src/vec/functions/function_date_or_datetime_to_string.cpp index b8f1c3206632c5..8c3fd755a0f4f4 100644 --- a/be/src/vec/functions/function_date_or_datetime_to_string.cpp +++ b/be/src/vec/functions/function_date_or_datetime_to_string.cpp @@ -33,6 +33,9 @@ using FunctionMonthNameV2 = FunctionDateOrDateTimeToString using FunctionDateTimeV2DayName = FunctionDateOrDateTimeToString>; using FunctionDateTimeV2MonthName = FunctionDateOrDateTimeToString>; +using FunctionDateIso8601 = FunctionDateOrDateTimeToString>; +using FunctionDateTimeIso8601 = FunctionDateOrDateTimeToString>; + void register_function_date_time_to_string(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function(); @@ -40,6 +43,8 @@ void register_function_date_time_to_string(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function(); factory.register_function(); + factory.register_function(); + factory.register_function(); } } // namespace doris::vectorized diff --git a/be/src/vec/functions/function_datetime_string_to_string.h b/be/src/vec/functions/function_datetime_string_to_string.h index 41eba51301c015..80fe6cf1f4174b 100644 --- a/be/src/vec/functions/function_datetime_string_to_string.h +++ b/be/src/vec/functions/function_datetime_string_to_string.h @@ -21,6 +21,7 @@ #include #include +#include #include "common/status.h" #include "vec/aggregate_functions/aggregate_function.h" @@ -29,6 +30,7 @@ #include "vec/columns/column_string.h" #include "vec/columns/column_vector.h" #include "vec/columns/columns_number.h" +#include "vec/common/assert_cast.h" #include "vec/common/string_ref.h" #include "vec/core/block.h" #include "vec/core/column_numbers.h" @@ -38,6 +40,7 @@ #include "vec/data_types/data_type.h" #include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_string.h" +#include "vec/functions/date_format_type.h" #include "vec/functions/date_time_transforms.h" #include "vec/functions/function.h" #include "vec/runtime/vdatetime_value.h" @@ -66,6 +69,57 @@ class FunctionDateTimeStringToString : public IFunction { return {}; } + struct FormatState { + std::string format_str; + // Check if the format string is null or exceeds the length limit. + bool is_valid = true; + time_format_type::FormatImplVariant format_type; + }; + + Status open(FunctionContext* context, FunctionContext::FunctionStateScope scope) override { + if (scope == FunctionContext::THREAD_LOCAL) { + return Status::OK(); + } + std::shared_ptr state = std::make_shared(); + DCHECK((context->get_num_args() == 1) || (context->get_num_args() == 2)); + context->set_function_state(scope, state); + if (context->get_num_args() == 1) { + // default argument + state->format_str = time_format_type::default_format; + state->format_type = time_format_type::default_impl; + return IFunction::open(context, scope); + } + + const auto* column_string = context->get_constant_col(1); + + if (column_string == nullptr) { + return Status::InvalidArgument( + "The second parameter of the function {} must be a constant.", get_name()); + } + + auto string_vale = column_string->column_ptr->get_data_at(0); + if (string_vale.data == nullptr) { + // func(col , null); + state->is_valid = false; + return IFunction::open(context, scope); + } + + string_vale = string_vale.trim(); + auto format_str = + time_format_type::rewrite_specific_format(string_vale.data, string_vale.size); + if (format_str.size > 128) { + // exceeds the length limit. + state->is_valid = false; + return IFunction::open(context, scope); + } + + // Preprocess special format strings. + state->format_str = format_str; + state->format_type = time_format_type::string_to_impl(state->format_str); + + return IFunction::open(context, scope); + } + DataTypePtr get_return_type_impl(const ColumnsWithTypeAndName& arguments) const override { return make_nullable(std::make_shared()); } @@ -78,42 +132,68 @@ class FunctionDateTimeStringToString : public IFunction { const ColumnPtr source_col = block.get_by_position(arguments[0]).column; const auto* nullable_column = check_and_get_column(source_col.get()); - const auto* sources = check_and_get_column>( + const auto* sources = assert_cast*>( nullable_column ? nullable_column->get_nested_column_ptr().get() : source_col.get()); - if (sources) { - auto col_res = ColumnString::create(); - ColumnUInt8::MutablePtr col_null_map_to; - col_null_map_to = ColumnUInt8::create(); - auto& vec_null_map_to = col_null_map_to->get_data(); - - if (arguments.size() == 2) { - const IColumn& source_col1 = *block.get_by_position(arguments[1]).column; - StringRef formatter = - source_col1.get_data_at(0); // for both ColumnString or ColumnConst. - TransformerToStringTwoArgument::vector_constant( - context, sources->get_data(), formatter, col_res->get_chars(), - col_res->get_offsets(), vec_null_map_to); - } else { //default argument - TransformerToStringTwoArgument::vector_constant( - context, sources->get_data(), StringRef("%Y-%m-%d %H:%i:%s"), - col_res->get_chars(), col_res->get_offsets(), vec_null_map_to); - } + auto col_res = ColumnString::create(); + ColumnUInt8::MutablePtr col_null_map_to; + col_null_map_to = ColumnUInt8::create(); + auto& vec_null_map_to = col_null_map_to->get_data(); - if (nullable_column) { - const auto& origin_null_map = nullable_column->get_null_map_column().get_data(); - for (int i = 0; i < origin_null_map.size(); ++i) { - vec_null_map_to[i] |= origin_null_map[i]; - } + RETURN_IF_ERROR(vector_constant(context, sources->get_data(), col_res->get_chars(), + col_res->get_offsets(), vec_null_map_to)); + + if (nullable_column) { + // input column is nullable + const auto& origin_null_map = nullable_column->get_null_map_column().get_data(); + for (int i = 0; i < origin_null_map.size(); ++i) { + vec_null_map_to[i] |= origin_null_map[i]; } - block.get_by_position(result).column = - ColumnNullable::create(std::move(col_res), std::move(col_null_map_to)); - } else { - return Status::InternalError("Illegal column {} of first argument of function {}", - block.get_by_position(arguments[0]).column->get_name(), - name); } + + block.get_by_position(result).column = + ColumnNullable::create(std::move(col_res), std::move(col_null_map_to)); + + return Status::OK(); + } + + Status vector_constant(FunctionContext* context, + const PaddedPODArray& ts, + ColumnString::Chars& res_data, ColumnString::Offsets& res_offsets, + PaddedPODArray& null_map) const { + auto* format_state = reinterpret_cast( + context->get_function_state(FunctionContext::FRAGMENT_LOCAL)); + if (!format_state) { + return Status::RuntimeError("funciton context for function '{}' must have FormatState;", + get_name()); + } + + StringRef format(format_state->format_str); + + const auto len = ts.size(); + + if (!format_state->is_valid) { + res_offsets.resize_fill(len, 0); + null_map.resize_fill(len, true); + return Status::OK(); + } + res_offsets.resize(len); + res_data.reserve(len * format.size + len); + null_map.resize_fill(len, false); + + std::visit( + [&](auto type) { + using Impl = decltype(type); + size_t offset = 0; + for (int i = 0; i < len; ++i) { + null_map[i] = Transform::template execute( + ts[i], format, res_data, offset, context->state()->timezone_obj()); + res_offsets[i] = offset; + } + res_data.resize(offset); + }, + format_state->format_type); return Status::OK(); } }; diff --git a/be/src/vec/functions/function_encryption.cpp b/be/src/vec/functions/function_encryption.cpp index c90b6a1ff60717..d017f761897133 100644 --- a/be/src/vec/functions/function_encryption.cpp +++ b/be/src/vec/functions/function_encryption.cpp @@ -244,8 +244,9 @@ struct EncryptionAndDecryptTwoImpl { if (mode_arg.size != 0) { if (!aes_mode_map.contains(mode_str)) { all_insert_null = true; + } else { + encryption_mode = aes_mode_map.at(mode_str); } - encryption_mode = aes_mode_map.at(mode_str); } const ColumnString::Offsets* offsets_column = &column->get_offsets(); const ColumnString::Chars* chars_column = &column->get_chars(); @@ -371,13 +372,15 @@ struct EncryptionAndDecryptMultiImpl { if constexpr (is_sm_mode) { if (sm4_mode_map.count(mode_str) == 0) { all_insert_null = true; + } else { + encryption_mode = sm4_mode_map.at(mode_str); } - encryption_mode = sm4_mode_map.at(mode_str); } else { if (aes_mode_map.count(mode_str) == 0) { all_insert_null = true; + } else { + encryption_mode = aes_mode_map.at(mode_str); } - encryption_mode = aes_mode_map.at(mode_str); } } diff --git a/be/src/vec/functions/function_jsonb.cpp b/be/src/vec/functions/function_jsonb.cpp index 53ccec756fd109..45864a07bdd08a 100644 --- a/be/src/vec/functions/function_jsonb.cpp +++ b/be/src/vec/functions/function_jsonb.cpp @@ -61,7 +61,9 @@ #include "vec/data_types/data_type_string.h" #include "vec/functions/function.h" #include "vec/functions/function_string.h" +#include "vec/functions/like.h" #include "vec/functions/simple_function_factory.h" +#include "vec/json/simd_json_parser.h" #include "vec/utils/util.hpp" namespace doris::vectorized { @@ -1598,6 +1600,356 @@ struct JsonbContainsAndPathImpl { } }; +class FunctionJsonSearch : public IFunction { +private: + using OneFun = std::function; + static Status always_one(size_t i, bool* res) { + *res = true; + return Status::OK(); + } + static Status always_all(size_t i, bool* res) { + *res = false; + return Status::OK(); + } + + using CheckNullFun = std::function; + static bool always_not_null(size_t) { return false; } + static bool always_null(size_t) { return true; } + + using GetJsonStringRefFun = std::function; + + Status matched(const std::string_view& str, LikeState* state, unsigned char* res) const { + StringRef pattern; // not used + StringRef value_val(str.data(), str.size()); + return (state->scalar_function)(&state->search_state, value_val, pattern, res); + } + + /** + * Recursive search for matching string, if found, the result will be added to a vector + * @param element json element + * @param one_match + * @param search_str + * @param cur_path + * @param matches The path that has already been matched + * @return true if matched else false + */ + bool find_matches(const SimdJSONParser::Element& element, const bool& one_match, + LikeState* state, JsonbPath* cur_path, + std::unordered_set* matches) const { + if (element.isString()) { + const std::string_view str = element.getString(); + unsigned char res; + RETURN_IF_ERROR(matched(str, state, &res)); + if (res) { + std::string str; + auto valid = cur_path->to_string(&str); + if (!valid) { + return false; + } + auto res = matches->insert(str); + return res.second; + } else { + return false; + } + } else if (element.isObject()) { + const SimdJSONParser::Object& object = element.getObject(); + bool find = false; + for (size_t i = 0; i < object.size(); ++i) { + const SimdJSONParser::KeyValuePair& item = object[i]; + const std::string_view& key = item.first; + const SimdJSONParser::Element& child_element = item.second; + // construct an object member path leg. + auto leg = std::make_unique(const_cast(key.data()), key.size(), 0, + MEMBER_CODE); + cur_path->add_leg_to_leg_vector(std::move(leg)); + find |= find_matches(child_element, one_match, state, cur_path, matches); + cur_path->pop_leg_from_leg_vector(); + if (one_match && find) { + return true; + } + } + return find; + } else if (element.isArray()) { + const SimdJSONParser::Array& array = element.getArray(); + bool find = false; + for (size_t i = 0; i < array.size(); ++i) { + auto leg = std::make_unique(nullptr, 0, i, ARRAY_CODE); + cur_path->add_leg_to_leg_vector(std::move(leg)); + const SimdJSONParser::Element& child_element = array[i]; + // construct an array cell path leg. + find |= find_matches(child_element, one_match, state, cur_path, matches); + cur_path->pop_leg_from_leg_vector(); + if (one_match && find) { + return true; + } + } + return find; + } else { + return false; + } + } + + void make_result_str(std::unordered_set& matches, ColumnString* result_col) const { + JsonbWriter writer; + if (matches.size() == 1) { + for (const auto& str_ref : matches) { + writer.writeStartString(); + writer.writeString(str_ref); + writer.writeEndString(); + } + } else { + writer.writeStartArray(); + for (const auto& str_ref : matches) { + writer.writeStartString(); + writer.writeString(str_ref); + writer.writeEndString(); + } + writer.writeEndArray(); + } + + result_col->insert_data(writer.getOutput()->getBuffer(), + (size_t)writer.getOutput()->getSize()); + } + + template + Status execute_vector(Block& block, size_t input_rows_count, CheckNullFun json_null_check, + GetJsonStringRefFun col_json_string, CheckNullFun one_null_check, + OneFun one_check, CheckNullFun search_null_check, + const ColumnString* col_search_string, FunctionContext* context, + size_t result) const { + auto result_col = ColumnString::create(); + auto null_map = ColumnUInt8::create(input_rows_count, 0); + + std::shared_ptr state_ptr; + LikeState* state = nullptr; + if (search_is_const) { + state = reinterpret_cast( + context->get_function_state(FunctionContext::THREAD_LOCAL)); + } + + SimdJSONParser parser; + SimdJSONParser::Element root_element; + bool is_one = false; + + for (size_t i = 0; i < input_rows_count; ++i) { + // an error occurs if the json_doc argument is not a valid json document. + if (json_null_check(i)) { + null_map->get_data()[i] = 1; + result_col->insert_data("", 0); + continue; + } + const auto& json_doc = col_json_string(i); + if (!parser.parse(json_doc.data, json_doc.size, root_element)) { + return Status::InvalidArgument( + "the json_doc argument {} is not a valid json document", json_doc); + } + + if (!one_null_check(i)) { + RETURN_IF_ERROR(one_check(i, &is_one)); + } + + if (one_null_check(i) || search_null_check(i)) { + null_map->get_data()[i] = 1; + result_col->insert_data("", 0); + continue; + } + + // an error occurs if any path argument is not a valid path expression. + std::string root_path_str = "$"; + JsonbPath root_path; + root_path.seek(root_path_str.c_str(), root_path_str.size()); + std::vector paths; + paths.push_back(&root_path); + + if (!search_is_const) { + state_ptr = std::make_shared(); + state_ptr->is_like_pattern = true; + const auto& search_str = col_search_string->get_data_at(i); + RETURN_IF_ERROR(FunctionLike::construct_like_const_state(context, search_str, + state_ptr, false)); + state = state_ptr.get(); + } + + // maintain a hashset to deduplicate matches. + std::unordered_set matches; + for (const auto& item : paths) { + auto cur_path = item; + auto find = find_matches(root_element, is_one, state, cur_path, &matches); + if (is_one && find) { + break; + } + } + if (matches.empty()) { + // returns NULL if the search_str is not found in the document. + null_map->get_data()[i] = 1; + result_col->insert_data("", 0); + continue; + } + make_result_str(matches, result_col.get()); + } + auto result_col_nullable = + ColumnNullable::create(std::move(result_col), std::move(null_map)); + block.replace_by_position(result, std::move(result_col_nullable)); + return Status::OK(); + } + + static constexpr auto one = "one"; + static constexpr auto all = "all"; + +public: + static constexpr auto name = "json_search"; + static FunctionPtr create() { return std::make_shared(); } + + String get_name() const override { return name; } + bool is_variadic() const override { return false; } + size_t get_number_of_arguments() const override { return 3; } + + DataTypePtr get_return_type_impl(const DataTypes& arguments) const override { + return make_nullable(std::make_shared()); + } + + bool use_default_implementation_for_nulls() const override { return false; } + + Status open(FunctionContext* context, FunctionContext::FunctionStateScope scope) override { + if (scope != FunctionContext::THREAD_LOCAL) { + return Status::OK(); + } + if (context->is_col_constant(2)) { + std::shared_ptr state = std::make_shared(); + state->is_like_pattern = true; + const auto pattern_col = context->get_constant_col(2)->column_ptr; + const auto& pattern = pattern_col->get_data_at(0); + RETURN_IF_ERROR( + FunctionLike::construct_like_const_state(context, pattern, state, false)); + context->set_function_state(scope, state); + } + return Status::OK(); + } + + Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, + size_t result, size_t input_rows_count) const override { + // the json_doc, one_or_all, and search_str must be given. + // and we require the positions are static. + if (arguments.size() < 3) { + return Status::InvalidArgument("too few arguments for function {}", name); + } + if (arguments.size() > 3) { + return Status::NotSupported("escape and path params are not support now"); + } + + CheckNullFun json_null_check = always_not_null; + GetJsonStringRefFun get_json_fun; + ColumnPtr col_json; + bool json_is_const = false; + // prepare jsonb data column + std::tie(col_json, json_is_const) = + unpack_if_const(block.get_by_position(arguments[0]).column); + const ColumnString* col_json_string = check_and_get_column(col_json); + if (auto* nullable = check_and_get_column(col_json)) { + col_json_string = check_and_get_column(nullable->get_nested_column_ptr()); + } + + if (!col_json_string) { + return Status::RuntimeError("Illegal arg json {} should be ColumnString", + col_json->get_name()); + } + if (json_is_const) { + if (col_json->is_null_at(0)) { + json_null_check = always_null; + } else { + const auto& json_str = col_json_string->get_data_at(0); + get_json_fun = [json_str](size_t i) { return json_str; }; + } + } else { + json_null_check = [col_json](size_t i) { return col_json->is_null_at(i); }; + get_json_fun = [col_json_string](size_t i) { return col_json_string->get_data_at(i); }; + } + + // one_or_all + CheckNullFun one_null_check = always_not_null; + OneFun one_check = always_one; + ColumnPtr col_one; + bool one_is_const = false; + // prepare jsonb data column + std::tie(col_one, one_is_const) = + unpack_if_const(block.get_by_position(arguments[1]).column); + const ColumnString* col_one_string = check_and_get_column(col_one); + if (auto* nullable = check_and_get_column(col_one)) { + col_one_string = check_and_get_column(*nullable->get_nested_column_ptr()); + } + if (!col_one_string) { + return Status::RuntimeError("Illegal arg one {} should be ColumnString", + col_one->get_name()); + } + if (one_is_const) { + if (col_one->is_null_at(0)) { + one_null_check = always_null; + } else { + const auto& one_or_all = col_one_string->get_data_at(0); + std::string one_or_all_str = one_or_all.to_string(); + if (strcasecmp(one_or_all_str.c_str(), all) == 0) { + one_check = always_all; + } else if (strcasecmp(one_or_all_str.c_str(), one) == 0) { + // nothing + } else { + // an error occurs if the one_or_all argument is not 'one' nor 'all'. + return Status::InvalidArgument( + "the one_or_all argument {} is not 'one' not 'all'", one_or_all_str); + } + } + } else { + one_null_check = [col_one](size_t i) { return col_one->is_null_at(i); }; + one_check = [col_one_string](size_t i, bool* is_one) { + const auto& one_or_all = col_one_string->get_data_at(i); + std::string one_or_all_str = one_or_all.to_string(); + if (strcasecmp(one_or_all_str.c_str(), all) == 0) { + *is_one = false; + } else if (strcasecmp(one_or_all_str.c_str(), one) == 0) { + *is_one = true; + } else { + // an error occurs if the one_or_all argument is not 'one' nor 'all'. + return Status::InvalidArgument( + "the one_or_all argument {} is not 'one' not 'all'", one_or_all_str); + } + return Status::OK(); + }; + } + + // search_str + ColumnPtr col_search; + bool search_is_const = false; + std::tie(col_search, search_is_const) = + unpack_if_const(block.get_by_position(arguments[2]).column); + + const ColumnString* col_search_string = check_and_get_column(col_search); + if (auto* nullable = check_and_get_column(col_search)) { + col_search_string = + check_and_get_column(*nullable->get_nested_column_ptr()); + } + if (!col_search_string) { + return Status::RuntimeError("Illegal arg pattern {} should be ColumnString", + col_search->get_name()); + } + if (search_is_const) { + CheckNullFun search_null_check = always_not_null; + if (col_search->is_null_at(0)) { + search_null_check = always_null; + } + RETURN_IF_ERROR(execute_vector( + block, input_rows_count, json_null_check, get_json_fun, one_null_check, + one_check, search_null_check, col_search_string, context, result)); + } else { + CheckNullFun search_null_check = [col_search](size_t i) { + return col_search->is_null_at(i); + }; + RETURN_IF_ERROR(execute_vector( + block, input_rows_count, json_null_check, get_json_fun, one_null_check, + one_check, search_null_check, col_search_string, context, result)); + } + return Status::OK(); + } +}; + void register_function_jsonb(SimpleFunctionFactory& factory) { factory.register_function(FunctionJsonbParse::name); factory.register_alias(FunctionJsonbParse::name, FunctionJsonbParse::alias); @@ -1666,6 +2018,8 @@ void register_function_jsonb(SimpleFunctionFactory& factory) { factory.register_function>(); factory.register_function>(); factory.register_function>(); + + factory.register_function(); } } // namespace doris::vectorized diff --git a/be/src/vec/functions/function_string.cpp b/be/src/vec/functions/function_string.cpp index d0e12bb498430b..1a62c9daaf66f7 100644 --- a/be/src/vec/functions/function_string.cpp +++ b/be/src/vec/functions/function_string.cpp @@ -1046,6 +1046,7 @@ void register_function_string(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function(); factory.register_function(); + factory.register_function(); factory.register_function(); factory.register_function>(); factory.register_function>(); @@ -1057,6 +1058,7 @@ void register_function_string(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function>(); factory.register_function>(); + factory.register_function(); factory.register_function(); factory.register_function>(); factory.register_function>(); diff --git a/be/src/vec/functions/function_string.h b/be/src/vec/functions/function_string.h index 2e33dba3f332fe..0cb929779d7525 100644 --- a/be/src/vec/functions/function_string.h +++ b/be/src/vec/functions/function_string.h @@ -17,28 +17,25 @@ #pragma once -#include -#include -#include #include #include #include #include +#include #include #include #include #include +#include #include -#include #include #include #include -#include #include -#include #include #include +#include #include #include #include @@ -49,7 +46,6 @@ #include "gutil/strings/numbers.h" #include "gutil/strings/substitute.h" #include "runtime/decimalv2_value.h" -#include "runtime/runtime_state.h" #include "runtime/string_search.hpp" #include "util/sha.h" #include "util/string_util.h" @@ -64,17 +60,11 @@ #include "vec/common/memcpy_small.h" #include "vec/common/pod_array.h" #include "vec/common/pod_array_fwd.h" -#include "vec/common/string_utils/string_utils.h" -#include "vec/common/typeid_cast.h" #include "vec/core/block.h" #include "vec/core/column_numbers.h" #include "vec/core/column_with_type_and_name.h" -#include "vec/core/field.h" #include "vec/core/types.h" #include "vec/data_types/data_type.h" -#include "vec/functions/function_binary_arithmetic.h" -#include "vec/functions/round.h" -#include "vec/io/io_helper.h" #include "vec/utils/template_helpers.hpp" #ifndef USE_LIBCPP @@ -242,9 +232,11 @@ struct SubstringUtil { const char* str_data = (char*)chars.data() + offsets[i - 1]; int start_value = is_const ? start[0] : start[i]; int len_value = is_const ? len[0] : len[i]; - + // Unsigned numbers cannot be used here because start_value can be negative. + int char_len = simd::VStringFunctions::get_char_len(str_data, str_size); // return empty string if start > src.length - if (start_value > str_size || str_size == 0 || start_value == 0 || len_value <= 0) { + // Here, start_value is compared against the length of the character. + if (start_value > char_len || str_size == 0 || start_value == 0 || len_value <= 0) { StringOP::push_empty_string(i, res_chars, res_offsets); continue; } @@ -1007,6 +999,11 @@ class FunctionNotNullOrEmpty : public IFunction { class FunctionStringConcat : public IFunction { public: + struct ConcatState { + bool use_state = false; + std::string tail; + }; + static constexpr auto name = "concat"; static FunctionPtr create() { return std::make_shared(); } String get_name() const override { return name; } @@ -1017,6 +1014,40 @@ class FunctionStringConcat : public IFunction { return std::make_shared(); } + Status open(FunctionContext* context, FunctionContext::FunctionStateScope scope) override { + if (scope == FunctionContext::THREAD_LOCAL) { + return Status::OK(); + } + std::shared_ptr state = std::make_shared(); + + context->set_function_state(scope, state); + + state->use_state = true; + + // Optimize function calls like this: + // concat(col, "123", "abc", "456") -> tail = "123abc456" + for (size_t i = 1; i < context->get_num_args(); i++) { + const auto* column_string = context->get_constant_col(i); + if (column_string == nullptr) { + state->use_state = false; + return IFunction::open(context, scope); + } + auto string_vale = column_string->column_ptr->get_data_at(0); + if (string_vale.data == nullptr) { + // For concat(col, null), it is handled by default_implementation_for_nulls + state->use_state = false; + return IFunction::open(context, scope); + } + + state->tail.append(string_vale.begin(), string_vale.size); + } + + // The reserve is used here to allow the usage of memcpy_small_allow_read_write_overflow15 below. + state->tail.reserve(state->tail.size() + 16); + + return IFunction::open(context, scope); + } + Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, size_t result, size_t input_rows_count) const override { DCHECK_GE(arguments.size(), 1); @@ -1025,7 +1056,29 @@ class FunctionStringConcat : public IFunction { block.get_by_position(result).column = block.get_by_position(arguments[0]).column; return Status::OK(); } + auto* concat_state = reinterpret_cast( + context->get_function_state(FunctionContext::FRAGMENT_LOCAL)); + if (!concat_state) { + return Status::RuntimeError("funciton context for function '{}' must have ConcatState;", + get_name()); + } + if (concat_state->use_state) { + const auto& [col, is_const] = + unpack_if_const(block.get_by_position(arguments[0]).column); + const auto* col_str = assert_cast(col.get()); + if (is_const) { + return execute_const(concat_state, block, col_str, result, input_rows_count); + } else { + return execute_const(concat_state, block, col_str, result, input_rows_count); + } + } else { + return execute_vecotr(block, arguments, result, input_rows_count); + } + } + + Status execute_vecotr(Block& block, const ColumnNumbers& arguments, size_t result, + size_t input_rows_count) const { int argument_size = arguments.size(); std::vector argument_columns(argument_size); @@ -1048,18 +1101,12 @@ class FunctionStringConcat : public IFunction { auto& res_offset = res->get_offsets(); res_offset.resize(input_rows_count); - size_t res_reserve_size = 0; - // we could ignore null string column - // but it's not necessary to ignore it for (size_t i = 0; i < argument_size; ++i) { if (is_const_args[i]) { - res_reserve_size += - ((*offsets_list[i])[0] - (*offsets_list[i])[-1]) * input_rows_count; + res_reserve_size += (*offsets_list[i])[0] * input_rows_count; } else { - for (size_t j = 0; j < input_rows_count; ++j) { - res_reserve_size += (*offsets_list[i])[j] - (*offsets_list[i])[j - 1]; - } + res_reserve_size += (*offsets_list[i])[input_rows_count - 1]; } } @@ -1067,24 +1114,65 @@ class FunctionStringConcat : public IFunction { res_data.resize(res_reserve_size); + auto* data = res_data.data(); + size_t dst_offset = 0; + for (size_t i = 0; i < input_rows_count; ++i) { - int current_length = 0; for (size_t j = 0; j < argument_size; ++j) { const auto& current_offsets = *offsets_list[j]; const auto& current_chars = *chars_list[j]; - auto idx = index_check_const(i, is_const_args[j]); - auto size = current_offsets[idx] - current_offsets[idx - 1]; + const auto size = current_offsets[idx] - current_offsets[idx - 1]; if (size > 0) { memcpy_small_allow_read_write_overflow15( - &res_data[res_offset[i - 1]] + current_length, - ¤t_chars[current_offsets[idx - 1]], size); - current_length += size; + data + dst_offset, current_chars.data() + current_offsets[idx - 1], + size); + dst_offset += size; } } - res_offset[i] = res_offset[i - 1] + current_length; + res_offset[i] = dst_offset; + } + + block.get_by_position(result).column = std::move(res); + return Status::OK(); + } + + template + Status execute_const(ConcatState* concat_state, Block& block, const ColumnString* col_str, + size_t result, size_t input_rows_count) const { + // using tail optimize + + auto res = ColumnString::create(); + auto& res_data = res->get_chars(); + auto& res_offset = res->get_offsets(); + res_offset.resize(input_rows_count); + + size_t res_reserve_size = 0; + if constexpr (is_const) { + res_reserve_size = col_str->get_offsets()[0] * input_rows_count; + } else { + res_reserve_size = col_str->get_offsets()[input_rows_count - 1]; } + res_reserve_size += concat_state->tail.size() * input_rows_count; + + ColumnString::check_chars_length(res_reserve_size, 0); + res_data.resize(res_reserve_size); + + const auto& tail = concat_state->tail; + auto* data = res_data.data(); + size_t dst_offset = 0; + for (size_t i = 0; i < input_rows_count; ++i) { + const auto idx = index_check_const(i); + StringRef str_val = col_str->get_data_at(idx); + // copy column + memcpy_small_allow_read_write_overflow15(data + dst_offset, str_val.data, str_val.size); + dst_offset += str_val.size; + // copy tail + memcpy_small_allow_read_write_overflow15(data + dst_offset, tail.data(), tail.size()); + dst_offset += tail.size(); + res_offset[i] = dst_offset; + } block.get_by_position(result).column = std::move(res); return Status::OK(); } @@ -1638,7 +1726,7 @@ class FunctionStringPad : public IFunction { const size_t pad_times = (len - str_char_size) / pad_char_size; const size_t pad_remainder_len = pad_index[(len - str_char_size) % pad_char_size]; const size_t new_capacity = str_len + size_t(pad_times + 1) * pad_len; - ColumnString::check_chars_length(new_capacity, 0); + ColumnString::check_chars_length(buffer_len + new_capacity, i); buffer.reserve(buffer_len + new_capacity); if constexpr (!Impl::is_lpad) { memcpy(buffer.data() + buffer_len, str_data, str_len); @@ -2088,18 +2176,8 @@ class FunctionSplitByString : public IFunction { continue; } if (delimiter_ref.size == 0) { - for (size_t str_pos = 0; str_pos < str_ref.size;) { - const size_t str_offset = str_pos; - const size_t old_size = column_string_chars.size(); - str_pos++; - const size_t new_size = old_size + 1; - column_string_chars.resize(new_size); - memcpy(column_string_chars.data() + old_size, str_ref.data + str_offset, 1); - (*dest_nested_null_map).push_back(false); - string_pos++; - dest_pos++; - column_string_offsets.push_back(string_pos); - } + split_empty_delimiter(str_ref, column_string_chars, column_string_offsets, + dest_nested_null_map, string_pos, dest_pos); } else { for (size_t str_pos = 0; str_pos <= str_ref.size;) { const size_t str_offset = str_pos; @@ -2154,18 +2232,8 @@ class FunctionSplitByString : public IFunction { continue; } if (delimiter_ref.size == 0) { - for (size_t str_pos = 0; str_pos < str_ref.size;) { - const size_t str_offset = str_pos; - const size_t old_size = column_string_chars.size(); - str_pos++; - const size_t new_size = old_size + 1; - column_string_chars.resize(new_size); - memcpy(column_string_chars.data() + old_size, str_ref.data + str_offset, 1); - (*dest_nested_null_map).push_back(false); - string_pos++; - dest_pos++; - column_string_offsets.push_back(string_pos); - } + split_empty_delimiter(str_ref, column_string_chars, column_string_offsets, + dest_nested_null_map, string_pos, dest_pos); } else { for (size_t str_pos = 0; str_pos <= str_ref.size;) { const size_t str_offset = str_pos; @@ -2206,18 +2274,8 @@ class FunctionSplitByString : public IFunction { const StringRef delimiter_ref = delimiter_col.get_data_at(i); if (delimiter_ref.size == 0) { - for (size_t str_pos = 0; str_pos < str_ref.size;) { - const size_t str_offset = str_pos; - const size_t old_size = column_string_chars.size(); - str_pos++; - const size_t new_size = old_size + 1; - column_string_chars.resize(new_size); - memcpy(column_string_chars.data() + old_size, str_ref.data + str_offset, 1); - (*dest_nested_null_map).push_back(false); - string_pos++; - dest_pos++; - column_string_offsets.push_back(string_pos); - } + split_empty_delimiter(str_ref, column_string_chars, column_string_offsets, + dest_nested_null_map, string_pos, dest_pos); } else { for (size_t str_pos = 0; str_pos <= str_ref.size;) { const size_t str_offset = str_pos; @@ -2251,6 +2309,47 @@ class FunctionSplitByString : public IFunction { } return pos - old_size; } + + void split_empty_delimiter(const StringRef& str_ref, ColumnString::Chars& column_string_chars, + ColumnString::Offsets& column_string_offsets, + NullMapType* dest_nested_null_map, ColumnArray::Offset64& string_pos, + ColumnArray::Offset64& dest_pos) const { + const size_t old_size = column_string_chars.size(); + const size_t new_size = old_size + str_ref.size; + column_string_chars.resize(new_size); + memcpy(column_string_chars.data() + old_size, str_ref.data, str_ref.size); + if (simd::VStringFunctions::is_ascii(str_ref)) { + const auto size = str_ref.size; + + dest_nested_null_map->resize_fill(dest_nested_null_map->size() + size, false); + + const auto old_size = column_string_offsets.size(); + const auto new_size = old_size + size; + column_string_offsets.resize(new_size); + std::iota(column_string_offsets.data() + old_size, + column_string_offsets.data() + new_size, string_pos + 1); + + string_pos += size; + dest_pos += size; + // The above code is equivalent to the code in the following comment. + // for (size_t i = 0; i < str_ref.size; i++) { + // string_pos++; + // column_string_offsets.push_back(string_pos); + // (*dest_nested_null_map).push_back(false); + // dest_pos++; + // } + } else { + for (size_t i = 0, utf8_char_len = 0; i < str_ref.size; i += utf8_char_len) { + utf8_char_len = UTF8_BYTE_LENGTH[(unsigned char)str_ref.data[i]]; + + string_pos += utf8_char_len; + column_string_offsets.push_back(string_pos); + + (*dest_nested_null_map).push_back(false); + dest_pos++; + } + } + } }; struct SM3Sum { @@ -2601,43 +2700,60 @@ class FunctionUrlDecode : public IFunction { static FunctionPtr create() { return std::make_shared(); } String get_name() const override { return name; } size_t get_number_of_arguments() const override { return 1; } - bool is_variadic() const override { return false; } - DataTypePtr get_return_type_impl(const DataTypes& arguments) const override { return std::make_shared(); } - Status execute_impl(FunctionContext* context, Block& block, - - const ColumnNumbers& arguments, size_t result, - size_t input_rows_count) const override { + Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, + size_t result, size_t input_rows_count) const override { auto res = ColumnString::create(); - auto& res_offsets = res->get_offsets(); - auto& res_chars = res->get_chars(); - res_offsets.resize(input_rows_count); + res->get_offsets().reserve(input_rows_count); - ColumnPtr argument_column = - block.get_by_position(arguments[0]).column->convert_to_full_column_if_const(); - const auto* url_col = check_and_get_column(argument_column.get()); + const auto* url_col = + assert_cast(block.get_by_position(arguments[0]).column.get()); - if (!url_col) { - return Status::InternalError("Not supported input argument type"); + std::string decoded_url; + for (size_t i = 0; i < input_rows_count; ++i) { + auto url = url_col->get_data_at(i); + if (!url_decode(url.to_string(), &decoded_url)) { + return Status::InternalError("Decode url failed"); + } + res->insert_data(decoded_url.data(), decoded_url.size()); + decoded_url.clear(); } - std::string decoded_url; + block.get_by_position(result).column = std::move(res); + return Status::OK(); + } +}; - for (size_t i = 0; i < input_rows_count; ++i) { - auto source = url_col->get_data_at(i); - StringRef url_val(const_cast(source.data), source.size); +class FunctionUrlEncode : public IFunction { +public: + static constexpr auto name = "url_encode"; + static FunctionPtr create() { return std::make_shared(); } + String get_name() const override { return name; } + size_t get_number_of_arguments() const override { return 1; } + DataTypePtr get_return_type_impl(const DataTypes& arguments) const override { + return std::make_shared(); + } + + Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, + size_t result, size_t input_rows_count) const override { + auto res = ColumnString::create(); + res->get_offsets().reserve(input_rows_count); - url_decode(url_val.to_string(), &decoded_url); + const auto* url_col = + assert_cast(block.get_by_position(arguments[0]).column.get()); - StringOP::push_value_string(decoded_url, i, res_chars, res_offsets); - decoded_url.clear(); + std::string encoded_url; + for (size_t i = 0; i < input_rows_count; ++i) { + auto url = url_col->get_data_at(i); + url_encode(url.to_string_view(), &encoded_url); + res->insert_data(encoded_url.data(), encoded_url.size()); + encoded_url.clear(); } block.get_by_position(result).column = std::move(res); - return Status::OK(); } }; @@ -3176,26 +3292,39 @@ class FunctionReplace : public IFunction { size_t result, size_t input_rows_count) const override { // We need a local variable to hold a reference to the converted column. // So that the converted column will not be released before we use it. - auto col_origin = - block.get_by_position(arguments[0]).column->convert_to_full_column_if_const(); - auto col_origin_str = assert_cast(col_origin.get()); - auto col_old = - block.get_by_position(arguments[1]).column->convert_to_full_column_if_const(); - auto col_old_str = assert_cast(col_old.get()); - auto col_new = - block.get_by_position(arguments[2]).column->convert_to_full_column_if_const(); - auto col_new_str = assert_cast(col_new.get()); + ColumnPtr col[3]; + bool col_const[3]; + for (size_t i = 0; i < 3; ++i) { + std::tie(col[i], col_const[i]) = + unpack_if_const(block.get_by_position(arguments[i]).column); + } + + const auto* col_origin_str = assert_cast(col[0].get()); + const auto* col_old_str = assert_cast(col[1].get()); + const auto* col_new_str = assert_cast(col[2].get()); ColumnString::MutablePtr col_res = ColumnString::create(); - for (int i = 0; i < input_rows_count; ++i) { - StringRef origin_str = col_origin_str->get_data_at(i); - StringRef old_str = col_old_str->get_data_at(i); - StringRef new_str = col_new_str->get_data_at(i); - std::string result = replace(origin_str.to_string(), old_str.to_string_view(), - new_str.to_string_view()); - col_res->insert_data(result.data(), result.length()); - } + std::visit( + [&](auto origin_str_const, auto old_str_const, auto new_str_const) { + for (int i = 0; i < input_rows_count; ++i) { + StringRef origin_str = + col_origin_str->get_data_at(index_check_const(i)); + StringRef old_str = + col_old_str->get_data_at(index_check_const(i)); + StringRef new_str = + col_new_str->get_data_at(index_check_const(i)); + + std::string result = + replace(origin_str.to_string(), old_str.to_string_view(), + new_str.to_string_view()); + + col_res->insert_data(result.data(), result.length()); + } + }, + vectorized::make_bool_variant(col_const[0]), + vectorized::make_bool_variant(col_const[1]), + vectorized::make_bool_variant(col_const[2])); block.replace_by_position(result, std::move(col_res)); return Status::OK(); @@ -3212,16 +3341,29 @@ class FunctionReplace : public IFunction { if (new_str.empty()) { return str; } - std::string result; - ColumnString::check_chars_length( - str.length() * (new_str.length() + 1) + new_str.length(), 0); - result.reserve(str.length() * (new_str.length() + 1) + new_str.length()); - for (char c : str) { + if (simd::VStringFunctions::is_ascii({str.data(), str.size()})) { + std::string result; + ColumnString::check_chars_length( + str.length() * (new_str.length() + 1) + new_str.length(), 0); + result.reserve(str.length() * (new_str.length() + 1) + new_str.length()); + for (char c : str) { + result += new_str; + result += c; + } + result += new_str; + return result; + } else { + std::string result; + result.reserve(str.length() * (new_str.length() + 1) + new_str.length()); + for (size_t i = 0, utf8_char_len = 0; i < str.size(); i += utf8_char_len) { + utf8_char_len = UTF8_BYTE_LENGTH[(unsigned char)str[i]]; + result += new_str; + result.append(&str[i], utf8_char_len); + } result += new_str; - result += c; + ColumnString::check_chars_length(result.size(), 0); + return result; } - result += new_str; - return result; } } else { std::string::size_type pos = 0; @@ -3279,8 +3421,6 @@ class FunctionSubReplace : public IFunction { return get_variadic_argument_types_impl().size(); } - bool use_default_implementation_for_nulls() const override { return false; } - Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, size_t result, size_t input_rows_count) const override { return Impl::execute_impl(context, block, arguments, result, input_rows_count); @@ -3291,59 +3431,116 @@ struct SubReplaceImpl { static Status replace_execute(Block& block, const ColumnNumbers& arguments, size_t result, size_t input_rows_count) { auto res_column = ColumnString::create(); - auto result_column = assert_cast(res_column.get()); + auto* result_column = assert_cast(res_column.get()); auto args_null_map = ColumnUInt8::create(input_rows_count, 0); ColumnPtr argument_columns[4]; + bool col_const[4]; for (int i = 0; i < 4; ++i) { - argument_columns[i] = - block.get_by_position(arguments[i]).column->convert_to_full_column_if_const(); - if (auto* nullable = check_and_get_column(*argument_columns[i])) { - // Danger: Here must dispose the null map data first! Because - // argument_columns[i]=nullable->get_nested_column_ptr(); will release the mem - // of column nullable mem of null map - VectorizedUtils::update_null_map(args_null_map->get_data(), - nullable->get_null_map_data()); - argument_columns[i] = nullable->get_nested_column_ptr(); - } + std::tie(argument_columns[i], col_const[i]) = + unpack_if_const(block.get_by_position(arguments[i]).column); } + const auto* data_column = assert_cast(argument_columns[0].get()); + const auto* mask_column = assert_cast(argument_columns[1].get()); + const auto* start_column = + assert_cast*>(argument_columns[2].get()); + const auto* length_column = + assert_cast*>(argument_columns[3].get()); - auto data_column = assert_cast(argument_columns[0].get()); - auto mask_column = assert_cast(argument_columns[1].get()); - auto start_column = assert_cast*>(argument_columns[2].get()); - auto length_column = assert_cast*>(argument_columns[3].get()); - - vector(data_column, mask_column, start_column->get_data(), length_column->get_data(), - args_null_map->get_data(), result_column, input_rows_count); - + std::visit( + [&](auto origin_str_const, auto new_str_const, auto start_const, auto len_const) { + if (simd::VStringFunctions::is_ascii( + StringRef {data_column->get_chars().data(), data_column->size()})) { + vector_ascii( + data_column, mask_column, start_column->get_data(), + length_column->get_data(), args_null_map->get_data(), result_column, + input_rows_count); + } else { + vector_utf8( + data_column, mask_column, start_column->get_data(), + length_column->get_data(), args_null_map->get_data(), result_column, + input_rows_count); + } + }, + vectorized::make_bool_variant(col_const[0]), + vectorized::make_bool_variant(col_const[1]), + vectorized::make_bool_variant(col_const[2]), + vectorized::make_bool_variant(col_const[3])); block.get_by_position(result).column = ColumnNullable::create(std::move(res_column), std::move(args_null_map)); return Status::OK(); } private: - static void vector(const ColumnString* data_column, const ColumnString* mask_column, - const PaddedPODArray& start, const PaddedPODArray& length, - NullMap& args_null_map, ColumnString* result_column, - size_t input_rows_count) { + template + static void vector_ascii(const ColumnString* data_column, const ColumnString* mask_column, + const PaddedPODArray& args_start, + const PaddedPODArray& args_length, NullMap& args_null_map, + ColumnString* result_column, size_t input_rows_count) { ColumnString::Chars& res_chars = result_column->get_chars(); ColumnString::Offsets& res_offsets = result_column->get_offsets(); for (size_t row = 0; row < input_rows_count; ++row) { - StringRef origin_str = data_column->get_data_at(row); - StringRef new_str = mask_column->get_data_at(row); - size_t origin_str_len = origin_str.size; + StringRef origin_str = + data_column->get_data_at(index_check_const(row)); + StringRef new_str = mask_column->get_data_at(index_check_const(row)); + const auto start = args_start[index_check_const(row)]; + const auto length = args_length[index_check_const(row)]; + const size_t origin_str_len = origin_str.size; //input is null, start < 0, len < 0, str_size <= start. return NULL - if (args_null_map[row] || start[row] < 0 || length[row] < 0 || - origin_str_len <= start[row]) { + if (args_null_map[row] || start < 0 || length < 0 || origin_str_len <= start) { res_offsets.push_back(res_chars.size()); args_null_map[row] = 1; } else { std::string_view replace_str = new_str.to_string_view(); std::string result = origin_str.to_string(); - result.replace(start[row], length[row], replace_str); + result.replace(start, length, replace_str); result_column->insert_data(result.data(), result.length()); } } } + + template + static void vector_utf8(const ColumnString* data_column, const ColumnString* mask_column, + const PaddedPODArray& args_start, + const PaddedPODArray& args_length, NullMap& args_null_map, + ColumnString* result_column, size_t input_rows_count) { + ColumnString::Chars& res_chars = result_column->get_chars(); + ColumnString::Offsets& res_offsets = result_column->get_offsets(); + + for (size_t row = 0; row < input_rows_count; ++row) { + StringRef origin_str = + data_column->get_data_at(index_check_const(row)); + StringRef new_str = mask_column->get_data_at(index_check_const(row)); + const auto start = args_start[index_check_const(row)]; + const auto length = args_length[index_check_const(row)]; + //input is null, start < 0, len < 0 return NULL + if (args_null_map[row] || start < 0 || length < 0) { + res_offsets.push_back(res_chars.size()); + args_null_map[row] = 1; + continue; + } + + const auto [start_byte_len, start_char_len] = + simd::VStringFunctions::iterate_utf8_with_limit_length(origin_str.begin(), + origin_str.end(), start); + + // start >= orgin.size + DCHECK(start_char_len <= start); + if (start_byte_len == origin_str.size) { + res_offsets.push_back(res_chars.size()); + args_null_map[row] = 1; + continue; + } + + auto [end_byte_len, end_char_len] = + simd::VStringFunctions::iterate_utf8_with_limit_length( + origin_str.begin() + start_byte_len, origin_str.end(), length); + DCHECK(end_char_len <= length); + std::string_view replace_str = new_str.to_string_view(); + std::string result = origin_str.to_string(); + result.replace(start_byte_len, end_byte_len, replace_str); + result_column->insert_data(result.data(), result.length()); + } + } }; struct SubReplaceThreeImpl { @@ -3360,13 +3557,14 @@ struct SubReplaceThreeImpl { auto str_col = block.get_by_position(arguments[1]).column->convert_to_full_column_if_const(); - if (auto* nullable = check_and_get_column(*str_col)) { + if (const auto* nullable = check_and_get_column(*str_col)) { str_col = nullable->get_nested_column_ptr(); } - auto& str_offset = assert_cast(str_col.get())->get_offsets(); - + const auto* str_column = assert_cast(str_col.get()); + // use utf8 len for (int i = 0; i < input_rows_count; ++i) { - strlen_data[i] = str_offset[i] - str_offset[i - 1]; + StringRef str_ref = str_column->get_data_at(i); + strlen_data[i] = simd::VStringFunctions::get_char_len(str_ref.data, str_ref.size); } block.insert({std::move(params), std::make_shared(), "strlen"}); @@ -4049,4 +4247,172 @@ class FunctionNgramSearch : public IFunction { } }; +class FunctionTranslate : public IFunction { +public: + static constexpr auto name = "translate"; + static FunctionPtr create() { return std::make_shared(); } + String get_name() const override { return name; } + size_t get_number_of_arguments() const override { return 3; } + + DataTypePtr get_return_type_impl(const DataTypes& arguments) const override { + return std::make_shared(); + }; + + DataTypes get_variadic_argument_types_impl() const override { + return {std::make_shared(), std::make_shared(), + std::make_shared()}; + } + + Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, + size_t result, size_t input_rows_count) const override { + CHECK_EQ(arguments.size(), 3); + auto col_res = ColumnString::create(); + bool col_const[3]; + ColumnPtr argument_columns[3]; + for (int i = 0; i < 3; ++i) { + col_const[i] = is_column_const(*block.get_by_position(arguments[i]).column); + } + argument_columns[0] = col_const[0] ? static_cast( + *block.get_by_position(arguments[0]).column) + .convert_to_full_column() + : block.get_by_position(arguments[0]).column; + default_preprocess_parameter_columns(argument_columns, col_const, {1, 2}, block, arguments); + + const auto* col_source = assert_cast(argument_columns[0].get()); + const auto* col_from = assert_cast(argument_columns[1].get()); + const auto* col_to = assert_cast(argument_columns[2].get()); + + bool is_ascii = simd::VStringFunctions::is_ascii( + {col_source->get_chars().data(), col_source->get_chars().size()}) && + simd::VStringFunctions::is_ascii( + {col_from->get_chars().data(), col_from->get_chars().size()}) && + simd::VStringFunctions::is_ascii( + {col_to->get_chars().data(), col_to->get_chars().size()}); + auto impl_vectors = impl_vectors_utf8; + if (col_const[1] && col_const[2] && is_ascii) { + impl_vectors = impl_vectors_ascii; + } else if (col_const[1] && col_const[2]) { + impl_vectors = impl_vectors_utf8; + } else if (is_ascii) { + impl_vectors = impl_vectors_ascii; + } + impl_vectors(col_source, col_from, col_to, col_res); + block.get_by_position(result).column = std::move(col_res); + return Status::OK(); + } + +private: + template + static void impl_vectors_ascii(const ColumnString* col_source, const ColumnString* col_from, + const ColumnString* col_to, ColumnString* col_res) { + col_res->get_chars().reserve(col_source->get_chars().size()); + col_res->get_offsets().reserve(col_source->get_offsets().size()); + std::unordered_map translate_map; + if (IsConst) { + const auto& from_str = col_from->get_data_at(0); + const auto& to_str = col_to->get_data_at(0); + translate_map = + build_translate_map_ascii(from_str.to_string_view(), to_str.to_string_view()); + } + for (size_t i = 0; i < col_source->size(); ++i) { + const auto& source_str = col_source->get_data_at(i); + if (!IsConst) { + const auto& from_str = col_from->get_data_at(i); + const auto& to_str = col_to->get_data_at(i); + translate_map = build_translate_map_ascii(from_str.to_string_view(), + to_str.to_string_view()); + } + auto translated_str = translate_ascii(source_str.to_string_view(), translate_map); + col_res->insert_data(translated_str.data(), translated_str.size()); + } + } + + static std::unordered_map build_translate_map_ascii( + const std::string_view& from_str, const std::string_view& to_str) { + std::unordered_map translate_map; + for (size_t i = 0; i < from_str.size(); ++i) { + if (translate_map.find(from_str[i]) == translate_map.end()) { + translate_map[from_str[i]] = i < to_str.size() ? to_str[i] : 0; + } + } + return translate_map; + } + + static std::string translate_ascii(const std::string_view& source_str, + std::unordered_map& translate_map) { + std::string result; + result.reserve(source_str.size()); + for (auto const& c : source_str) { + if (translate_map.find(c) != translate_map.end()) { + if (translate_map[c]) { + result.push_back(translate_map[c]); + } + } else { + result.push_back(c); + } + } + return result; + } + + template + static void impl_vectors_utf8(const ColumnString* col_source, const ColumnString* col_from, + const ColumnString* col_to, ColumnString* col_res) { + col_res->get_chars().reserve(col_source->get_chars().size()); + col_res->get_offsets().reserve(col_source->get_offsets().size()); + std::unordered_map translate_map; + if (IsConst) { + const auto& from_str = col_from->get_data_at(0); + const auto& to_str = col_to->get_data_at(0); + translate_map = + build_translate_map_utf8(from_str.to_string_view(), to_str.to_string_view()); + } + for (size_t i = 0; i < col_source->size(); ++i) { + const auto& source_str = col_source->get_data_at(i); + if (!IsConst) { + const auto& from_str = col_from->get_data_at(i); + const auto& to_str = col_to->get_data_at(i); + translate_map = build_translate_map_utf8(from_str.to_string_view(), + to_str.to_string_view()); + } + auto translated_str = translate_utf8(source_str.to_string_view(), translate_map); + col_res->insert_data(translated_str.data(), translated_str.size()); + } + } + + static std::unordered_map build_translate_map_utf8( + const std::string_view& from_str, const std::string_view& to_str) { + std::unordered_map translate_map; + for (size_t i = 0, from_char_size = 0, j = 0, to_char_size = 0; i < from_str.size(); + i += from_char_size, j += to_char_size) { + from_char_size = get_utf8_byte_length(from_str[i]); + to_char_size = j < to_str.size() ? get_utf8_byte_length(to_str[j]) : 0; + auto from_char = from_str.substr(i, from_char_size); + if (translate_map.find(from_char) == translate_map.end()) { + translate_map[from_char] = + j < to_str.size() ? to_str.substr(j, to_char_size) : std::string_view(); + } + } + return translate_map; + } + + static std::string translate_utf8( + const std::string_view& source_str, + std::unordered_map& translate_map) { + std::string result; + result.reserve(source_str.size()); + for (size_t i = 0, char_size = 0; i < source_str.size(); i += char_size) { + char_size = get_utf8_byte_length(source_str[i]); + auto c = source_str.substr(i, char_size); + if (translate_map.find(c) != translate_map.end()) { + if (!translate_map[c].empty()) { + result.append(translate_map[c]); + } + } else { + result.append(c); + } + } + return result; + } +}; + } // namespace doris::vectorized diff --git a/be/src/vec/functions/function_timestamp.cpp b/be/src/vec/functions/function_timestamp.cpp index 1edb014e99a9ee..428f8c2893fc57 100644 --- a/be/src/vec/functions/function_timestamp.cpp +++ b/be/src/vec/functions/function_timestamp.cpp @@ -77,6 +77,8 @@ struct StrToDate { static bool is_variadic() { return false; } + static size_t get_number_of_arguments() { return 2; } + static DataTypes get_variadic_argument_types() { return {std::make_shared(), std::make_shared()}; } @@ -245,6 +247,8 @@ struct MakeDateImpl { static bool is_variadic() { return false; } + static size_t get_number_of_arguments() { return 2; } + static DataTypes get_variadic_argument_types() { return {}; } static DataTypePtr get_return_type_impl(const DataTypes& arguments) { @@ -409,6 +413,8 @@ struct DateTrunc { static bool is_variadic() { return true; } + static size_t get_number_of_arguments() { return 2; } + static DataTypes get_variadic_argument_types() { return {std::make_shared(), std::make_shared()}; } @@ -1150,7 +1156,7 @@ class FunctionOtherTypesToDateType : public IFunction { String get_name() const override { return name; } - size_t get_number_of_arguments() const override { return 2; } + size_t get_number_of_arguments() const override { return Impl::get_number_of_arguments(); } bool is_variadic() const override { return Impl::is_variadic(); } @@ -1182,6 +1188,240 @@ class FunctionOtherTypesToDateType : public IFunction { } }; +struct FromIso8601DateV2 { + static constexpr auto name = "from_iso8601_date"; + + static size_t get_number_of_arguments() { return 1; } + + static bool is_variadic() { return false; } + + static DataTypes get_variadic_argument_types() { return {std::make_shared()}; } + + static DataTypePtr get_return_type_impl(const DataTypes& arguments) { + return make_nullable(std::make_shared()); + } + + static Status execute(FunctionContext* context, Block& block, const ColumnNumbers& arguments, + size_t result, size_t input_rows_count) { + const auto* src_column_ptr = block.get_by_position(arguments[0]).column.get(); + + auto null_map = ColumnUInt8::create(input_rows_count, 0); + + ColumnDateV2::MutablePtr res = ColumnDateV2::create(input_rows_count); + auto& result_data = res->get_data(); + + static const std::tuple, int, std::string> ISO_STRING_FORMAT[] = { + {{ + 8, + }, + 1, + "%04d%02d%02d"}, //YYYYMMDD + {{4, -1, 2, -1, 2}, 1, "%04d-%02d-%02d"}, //YYYY-MM-DD + {{4, -1, 2}, 2, "%04d-%02d"}, //YYYY-MM + { + { + 4, + }, + 3, + "%04d", + }, //YYYY + { + {4, -1, 3}, + 4, + "%04d-%03d", + }, //YYYY-DDD + { + { + 7, + }, + 4, + "%04d%03d", + }, //YYYYDDD + { + {4, -1, -2, 2}, + 5, + "%04d-W%02d", + }, //YYYY-Www + { + {4, -2, 2}, + 5, + "%04dW%02d", + }, //YYYYWww + { + {4, -1, -2, 2, -1, 1}, + 6, + "%04d-W%02d-%1d", + }, //YYYY-Www-D + { + {4, -2, 3}, + 6, + "%04dW%02d%1d", + }, //YYYYWwwD + }; + + for (size_t i = 0; i < input_rows_count; ++i) { + int year, month, day, week, day_of_year; + int weekday = 1; // YYYYWww YYYY-Www default D = 1 + auto src_string = src_column_ptr->get_data_at(i).to_string_view(); + + int iso_string_format_value = 0; + + vector src_string_values; + src_string_values.reserve(10); + + //The maximum length of the current iso8601 format is 10. + if (src_string.size() <= 10) { + // The calculation string corresponds to the iso8601 format. + // The integer represents the number of consecutive numbers. + // -1 represent char '-'. + // -2 represent char 'W'. + // The calculated vector `src_string_values` will be compared with `ISO_STRING_FORMAT[]` later. + for (int idx = 0; idx < src_string.size();) { + char current = src_string[idx]; + if (current == '-') { + src_string_values.emplace_back(-1); + idx++; + continue; + } else if (current == 'W') { + src_string_values.emplace_back(-2); + idx++; + continue; + } else if (!isdigit(current)) { + iso_string_format_value = -1; + break; + } + int currLen = 0; + for (; idx < src_string.size() && isdigit(src_string[idx]); ++idx) { + ++currLen; + } + src_string_values.emplace_back(currLen); + } + } else { + iso_string_format_value = -1; + } + + std::string_view iso_format_string; + if (iso_string_format_value != -1) { + for (const auto& j : ISO_STRING_FORMAT) { + const auto& v = std::get<0>(j); + if (v == src_string_values) { + iso_string_format_value = std::get<1>(j); + iso_format_string = std::get<2>(j); + break; + } + } + } + + auto& ts_value = *reinterpret_cast*>(&result_data[i]); + if (iso_string_format_value == 1) { + if (sscanf(src_string.data(), iso_format_string.data(), &year, &month, &day) != 3) + [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + + if (!(ts_value.template set_time_unit(year) && + ts_value.template set_time_unit(month) && + ts_value.template set_time_unit(day))) [[unlikely]] { + null_map->get_data().data()[i] = true; + } + } else if (iso_string_format_value == 2) { + if (sscanf(src_string.data(), iso_format_string.data(), &year, &month) != 2) + [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + + if (!(ts_value.template set_time_unit(year) && + ts_value.template set_time_unit(month))) [[unlikely]] { + null_map->get_data().data()[i] = true; + } + ts_value.template unchecked_set_time_unit(1); + } else if (iso_string_format_value == 3) { + if (sscanf(src_string.data(), iso_format_string.data(), &year) != 1) [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + + if (!ts_value.template set_time_unit(year)) [[unlikely]] { + null_map->get_data().data()[i] = true; + } + ts_value.template unchecked_set_time_unit(1); + ts_value.template unchecked_set_time_unit(1); + + } else if (iso_string_format_value == 5 || iso_string_format_value == 6) { + if (iso_string_format_value == 5) { + if (sscanf(src_string.data(), iso_format_string.data(), &year, &week) != 2) + [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + } else { + if (sscanf(src_string.data(), iso_format_string.data(), &year, &week, + &weekday) != 3) [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + } + // weekday [1,7] week [1,53] + if (weekday < 1 || weekday > 7 || week < 1 || week > 53) [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + + auto first_day_of_week = getFirstDayOfISOWeek(year); + ts_value.template unchecked_set_time_unit( + first_day_of_week.year().operator int()); + ts_value.template unchecked_set_time_unit( + first_day_of_week.month().operator unsigned int()); + ts_value.template unchecked_set_time_unit( + first_day_of_week.day().operator unsigned int()); + + auto day_diff = (week - 1) * 7 + weekday - 1; + TimeInterval interval(DAY, day_diff, false); + ts_value.date_add_interval(interval); + } else if (iso_string_format_value == 4) { + if (sscanf(src_string.data(), iso_format_string.data(), &year, &day_of_year) != 2) + [[unlikely]] { + null_map->get_data().data()[i] = true; + continue; + } + + if (is_leap(year)) { + if (day_of_year < 0 || day_of_year > 366) [[unlikely]] { + null_map->get_data().data()[i] = true; + } + } else { + if (day_of_year < 0 || day_of_year > 365) [[unlikely]] { + null_map->get_data().data()[i] = true; + } + } + ts_value.template unchecked_set_time_unit(year); + ts_value.template unchecked_set_time_unit(1); + ts_value.template unchecked_set_time_unit(1); + TimeInterval interval(DAY, day_of_year - 1, false); + ts_value.template date_add_interval(interval); + } else { + null_map->get_data().data()[i] = true; + } + } + block.get_by_position(result).column = + ColumnNullable::create(std::move(res), std::move(null_map)); + return Status::OK(); + } + +private: + //Get the date corresponding to Monday of the first week of the year according to the ISO8601 standard. + static std::chrono::year_month_day getFirstDayOfISOWeek(int year) { + using namespace std::chrono; + auto jan4 = year_month_day {std::chrono::year(year) / January / 4}; + auto jan4_sys_days = sys_days {jan4}; + auto weekday_of_jan4 = weekday {jan4_sys_days}; + auto first_day_of_week = jan4_sys_days - days {(weekday_of_jan4.iso_encoding() - 1)}; + return year_month_day {floor(first_day_of_week)}; + } +}; + using FunctionStrToDate = FunctionOtherTypesToDateType>; using FunctionStrToDatetime = FunctionOtherTypesToDateType>; using FunctionStrToDateV2 = FunctionOtherTypesToDateType>; @@ -1191,6 +1431,7 @@ using FunctionDateTruncDate = FunctionOtherTypesToDateType>; using FunctionDateTruncDatetime = FunctionOtherTypesToDateType>; using FunctionDateTruncDatetimeV2 = FunctionOtherTypesToDateType>; +using FunctionFromIso8601DateV2 = FunctionOtherTypesToDateType; void register_function_timestamp(SimpleFunctionFactory& factory) { factory.register_function(); @@ -1203,6 +1444,7 @@ void register_function_timestamp(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function(); factory.register_function(); + factory.register_function(); factory.register_function>(); factory.register_function>>(); diff --git a/be/src/vec/functions/function_tokenize.cpp b/be/src/vec/functions/function_tokenize.cpp index e7dc2debe62ad8..be0eb5dddc960d 100644 --- a/be/src/vec/functions/function_tokenize.cpp +++ b/be/src/vec/functions/function_tokenize.cpp @@ -26,6 +26,7 @@ #include "CLucene/StdHeader.h" #include "CLucene/config/repl_wchar.h" #include "olap/inverted_index_parser.h" +#include "olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h" #include "olap/rowset/segment_v2/inverted_index_reader.h" #include "vec/columns/column.h" #include "vec/common/string_ref.h" @@ -79,13 +80,14 @@ void FunctionTokenize::_do_tokenize(const ColumnString& src_column_string, dest_offsets.push_back(dest_pos); continue; } - auto reader = doris::segment_v2::InvertedIndexReader::create_reader( - &inverted_index_ctx, tokenize_str.to_string()); - - std::vector query_tokens; - doris::segment_v2::InvertedIndexReader::get_analyse_result( - query_tokens, reader.get(), inverted_index_ctx.analyzer, "tokenize", - doris::segment_v2::InvertedIndexQueryType::MATCH_PHRASE_QUERY); + auto reader = doris::segment_v2::inverted_index::InvertedIndexAnalyzer::create_reader( + inverted_index_ctx.char_filter_map); + reader->init(tokenize_str.data, tokenize_str.size, true); + + std::vector query_tokens = + doris::segment_v2::inverted_index::InvertedIndexAnalyzer::get_analyse_result( + reader.get(), inverted_index_ctx.analyzer, "tokenize", + doris::segment_v2::InvertedIndexQueryType::MATCH_PHRASE_QUERY); for (auto token : query_tokens) { const size_t old_size = column_string_chars.size(); const size_t split_part_size = token.length(); @@ -143,18 +145,18 @@ Status FunctionTokenize::execute_impl(FunctionContext* /*context*/, Block& block inverted_index_ctx.parser_mode = get_parser_mode_string_from_properties(properties); inverted_index_ctx.char_filter_map = get_parser_char_filter_map_from_properties(properties); + inverted_index_ctx.lower_case = get_parser_lowercase_from_properties(properties); + inverted_index_ctx.stop_words = get_parser_stopwords_from_properties(properties); std::unique_ptr analyzer; try { - analyzer = doris::segment_v2::InvertedIndexReader::create_analyzer( - &inverted_index_ctx); + analyzer = + doris::segment_v2::inverted_index::InvertedIndexAnalyzer::create_analyzer( + &inverted_index_ctx); } catch (CLuceneError& e) { return Status::Error( "inverted index create analyzer failed: {}", e.what()); } - doris::segment_v2::FullTextIndexReader::setup_analyzer_lowercase(analyzer, properties); - doris::segment_v2::FullTextIndexReader::setup_analyzer_use_stopwords(analyzer, - properties); inverted_index_ctx.analyzer = analyzer.get(); _do_tokenize(*col_left, inverted_index_ctx, *dest_nested_column, dest_offsets, diff --git a/be/src/vec/functions/functions_multi_string_search.cpp b/be/src/vec/functions/functions_multi_string_search.cpp index f7a1b8d7a90057..7736a1a039b8ab 100644 --- a/be/src/vec/functions/functions_multi_string_search.cpp +++ b/be/src/vec/functions/functions_multi_string_search.cpp @@ -20,10 +20,10 @@ #include #include -#include #include #include +#include #include #include #include @@ -80,42 +80,30 @@ class FunctionsMultiStringSearch : public IFunction { auto haystack_column = block.get_by_position(arguments[0]).column; auto needles_column = block.get_by_position(arguments[1]).column; - bool haystack_nullable = false; - bool needles_nullable = false; - - if (haystack_column->is_nullable()) { - haystack_nullable = true; - } - - if (needles_column->is_nullable()) { - needles_nullable = true; - } - auto haystack_ptr = remove_nullable(haystack_column); auto needles_ptr = remove_nullable(needles_column); - const ColumnString* col_haystack_vector = - check_and_get_column(&*haystack_ptr); + const auto* col_haystack_vector = check_and_get_column(&*haystack_ptr); const ColumnConst* col_haystack_const = check_and_get_column_const(&*haystack_ptr); - const ColumnArray* col_needles_vector = - check_and_get_column(needles_ptr.get()); + const auto* col_needles_vector = check_and_get_column(needles_ptr.get()); const ColumnConst* col_needles_const = check_and_get_column_const(needles_ptr.get()); - if (!col_needles_const && !col_needles_vector) + if (!col_needles_const && !col_needles_vector) { return Status::InvalidArgument( "function '{}' encountered unsupported needles column, found {}", name, needles_column->get_name()); + } - if (col_haystack_const && col_needles_vector) + if (col_haystack_const && col_needles_vector) { return Status::InvalidArgument( "function '{}' doesn't support search with non-constant needles " "in constant haystack", name); + } - using ResultType = typename Impl::ResultType; auto col_res = ColumnVector::create(); auto col_offsets = ColumnArray::ColumnOffsets::create(); @@ -140,25 +128,8 @@ class FunctionsMultiStringSearch : public IFunction { return status; } - if (haystack_nullable) { - auto column_nullable = check_and_get_column(haystack_column.get()); - auto& null_map = column_nullable->get_null_map_data(); - for (size_t i = 0; i != input_rows_count; ++i) { - if (null_map[i] == 1) { - vec_res[i] = 0; - } - } - } - - if (needles_nullable) { - auto column_nullable = check_and_get_column(needles_column.get()); - auto& null_map = column_nullable->get_null_map_data(); - for (size_t i = 0; i != input_rows_count; ++i) { - if (null_map[i] == 1) { - vec_res[i] = 0; - } - } - } + handle_nullable_column(haystack_column, vec_res, input_rows_count); + handle_nullable_column(needles_column, vec_res, input_rows_count); block.replace_by_position(result, std::move(col_res)); @@ -166,9 +137,25 @@ class FunctionsMultiStringSearch : public IFunction { } private: + using ResultType = typename Impl::ResultType; + const bool allow_hyperscan_ = true; const size_t max_hyperscan_regexp_length_ = 0; // not limited const size_t max_hyperscan_regexp_total_length_ = 0; // not limited + + /// Handles nullable column by setting result to 0 if the input is null + void handle_nullable_column(const ColumnPtr& column, PaddedPODArray& vec_res, + size_t input_rows_count) const { + if (column->is_nullable()) { + const auto* column_nullable = assert_cast(column.get()); + const auto& null_map = column_nullable->get_null_map_data(); + for (size_t i = 0; i != input_rows_count; ++i) { + if (null_map[i] == 1) { + vec_res[i] = 0; + } + } + } + } }; /// For more readable instantiations of MultiMatchAnyImpl<> @@ -187,17 +174,67 @@ struct FunctionMultiMatchAnyImpl { static auto get_return_type() { return std::make_shared>(); } + /** + * Prepares the regular expressions and scratch space for Hyperscan. + * + * This function takes a vector of needles (substrings to search for) and initializes + * the regular expressions and scratch space required for Hyperscan, a high-performance + * regular expression matching library. + * + */ + static Status prepare_regexps_and_scratch(const std::vector& needles, + multiregexps::Regexps*& regexps, + multiregexps::ScratchPtr& smart_scratch) { + multiregexps::DeferredConstructedRegexpsPtr deferred_constructed_regexps = + multiregexps::getOrSet(needles, std::nullopt); + regexps = deferred_constructed_regexps->get(); + + hs_scratch_t* scratch = nullptr; + hs_error_t err = hs_clone_scratch(regexps->getScratch(), &scratch); + + if (err != HS_SUCCESS) { + return Status::InternalError("could not clone scratch space for vectorscan"); + } + + smart_scratch.reset(scratch); + return Status::OK(); + } + + /** + * Static callback function to handle the match results of the hs_scan function. + * + * This function is called when a matching substring is found while scanning with + * Hyperscan. It updates the result based on the match information. + * + */ + static int on_match([[maybe_unused]] unsigned int id, unsigned long long /* from */, // NOLINT + unsigned long long /* to */, // NOLINT + unsigned int /* flags */, void* context) { + if constexpr (FindAnyIndex) { + *reinterpret_cast(context) = id; + } else if constexpr (FindAny) { + *reinterpret_cast(context) = 1; + } + /// Once we hit the callback, there is no need to search for others. + return 1; + } + static Status vector_constant(const ColumnString::Chars& haystack_data, const ColumnString::Offsets& haystack_offsets, const Array& needles_arr, PaddedPODArray& res, PaddedPODArray& offsets, bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length) { - if (!allow_hyperscan) return Status::InvalidArgument("Hyperscan functions are disabled"); + if (!allow_hyperscan) { + return Status::InvalidArgument("Hyperscan functions are disabled"); + } std::vector needles; needles.reserve(needles_arr.size()); - for (const auto& needle : needles_arr) needles.emplace_back(needle.get()); + for (const auto& needle : needles_arr) { + needles.emplace_back(needle.get()); + } res.resize(haystack_offsets.size()); @@ -206,44 +243,26 @@ struct FunctionMultiMatchAnyImpl { return Status::OK(); } - multiregexps::DeferredConstructedRegexpsPtr deferred_constructed_regexps = - multiregexps::getOrSet( - needles, std::nullopt); - multiregexps::Regexps* regexps = deferred_constructed_regexps->get(); - - hs_scratch_t* scratch = nullptr; - hs_error_t err = hs_clone_scratch(regexps->getScratch(), &scratch); + multiregexps::Regexps* regexps = nullptr; + multiregexps::ScratchPtr smart_scratch; + RETURN_IF_ERROR(prepare_regexps_and_scratch(needles, regexps, smart_scratch)); - if (err != HS_SUCCESS) - return Status::InternalError("could not clone scratch space for vectorscan"); - - multiregexps::ScratchPtr smart_scratch(scratch); - - auto on_match = []([[maybe_unused]] unsigned int id, - unsigned long long /* from */, // NOLINT - unsigned long long /* to */, // NOLINT - unsigned int /* flags */, void* context) -> int { - if constexpr (FindAnyIndex) - *reinterpret_cast(context) = id; - else if constexpr (FindAny) - *reinterpret_cast(context) = 1; - /// Once we hit the callback, there is no need to search for others. - return 1; - }; const size_t haystack_offsets_size = haystack_offsets.size(); UInt64 offset = 0; for (size_t i = 0; i < haystack_offsets_size; ++i) { UInt64 length = haystack_offsets[i] - offset; /// vectorscan restriction. - if (length > std::numeric_limits::max()) + if (length > std::numeric_limits::max()) { return Status::InternalError("too long string to search"); + } /// zero the result, scan, check, update the offset. res[i] = 0; - err = hs_scan(regexps->getDB(), - reinterpret_cast(haystack_data.data()) + offset, - static_cast(length), 0, smart_scratch.get(), on_match, &res[i]); - if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) + hs_error_t err = hs_scan( + regexps->getDB(), reinterpret_cast(haystack_data.data()) + offset, + static_cast(length), 0, smart_scratch.get(), on_match, &res[i]); + if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) { return Status::InternalError("failed to scan with vectorscan"); + } offset = haystack_offsets[i]; } @@ -257,20 +276,22 @@ struct FunctionMultiMatchAnyImpl { PaddedPODArray& res, PaddedPODArray& offsets, bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length) { - if (!allow_hyperscan) return Status::InvalidArgument("Hyperscan functions are disabled"); + if (!allow_hyperscan) { + return Status::InvalidArgument("Hyperscan functions are disabled"); + } res.resize(haystack_offsets.size()); size_t prev_haystack_offset = 0; size_t prev_needles_offset = 0; - auto& nested_column = + const auto& nested_column = vectorized::check_and_get_column(needles_data) ->get_nested_column(); - const ColumnString* needles_data_string = check_and_get_column(nested_column); + const auto* needles_data_string = check_and_get_column(nested_column); if (!needles_data_string) { - return Status::InvalidArgument("needles should be string"); + return Status::InvalidArgument("needles should be string column"); } std::vector needles; @@ -287,46 +308,27 @@ struct FunctionMultiMatchAnyImpl { continue; } - multiregexps::DeferredConstructedRegexpsPtr deferred_constructed_regexps = - multiregexps::getOrSet( - needles, std::nullopt); - multiregexps::Regexps* regexps = deferred_constructed_regexps->get(); - - hs_scratch_t* scratch = nullptr; - hs_error_t err = hs_clone_scratch(regexps->getScratch(), &scratch); - - if (err != HS_SUCCESS) - return Status::InternalError("could not clone scratch space for vectorscan"); - - multiregexps::ScratchPtr smart_scratch(scratch); - - auto on_match = []([[maybe_unused]] unsigned int id, - unsigned long long /* from */, // NOLINT - unsigned long long /* to */, // NOLINT - unsigned int /* flags */, void* context) -> int { - if constexpr (FindAnyIndex) - *reinterpret_cast(context) = id; - else if constexpr (FindAny) - *reinterpret_cast(context) = 1; - /// Once we hit the callback, there is no need to search for others. - return 1; - }; + multiregexps::Regexps* regexps = nullptr; + multiregexps::ScratchPtr smart_scratch; + RETURN_IF_ERROR(prepare_regexps_and_scratch(needles, regexps, smart_scratch)); const size_t cur_haystack_length = haystack_offsets[i] - prev_haystack_offset; /// vectorscan restriction. - if (cur_haystack_length > std::numeric_limits::max()) + if (cur_haystack_length > std::numeric_limits::max()) { return Status::InternalError("too long string to search"); + } /// zero the result, scan, check, update the offset. res[i] = 0; - err = hs_scan( + hs_error_t err = hs_scan( regexps->getDB(), reinterpret_cast(haystack_data.data()) + prev_haystack_offset, static_cast(cur_haystack_length), 0, smart_scratch.get(), on_match, &res[i]); - if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) + if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) { return Status::InternalError("failed to scan with vectorscan"); + } prev_haystack_offset = haystack_offsets[i]; prev_needles_offset = needles_offsets[i]; diff --git a/be/src/vec/functions/like.cpp b/be/src/vec/functions/like.cpp index 376e28c06903e2..0207089b666f4a 100644 --- a/be/src/vec/functions/like.cpp +++ b/be/src/vec/functions/like.cpp @@ -816,119 +816,121 @@ void verbose_log_match(const std::string& str, const std::string& pattern_name, } } +Status FunctionLike::construct_like_const_state(FunctionContext* context, const StringRef& pattern, + std::shared_ptr& state, + bool try_hyperscan) { + std::string pattern_str = pattern.to_string(); + state->search_state.pattern_str = pattern_str; + std::string search_string; + + if (!pattern_str.empty() && RE2::FullMatch(pattern_str, LIKE_ALLPASS_RE)) { + state->search_state.set_search_string(""); + state->function = constant_allpass_fn; + state->scalar_function = constant_allpass_fn_scalar; + } else if (pattern_str.empty() || RE2::FullMatch(pattern_str, LIKE_EQUALS_RE, &search_string)) { + if (VLOG_DEBUG_IS_ON) { + verbose_log_match(pattern_str, "LIKE_EQUALS_RE", LIKE_EQUALS_RE); + VLOG_DEBUG << "search_string : " << search_string << ", size: " << search_string.size(); + } + remove_escape_character(&search_string); + if (VLOG_DEBUG_IS_ON) { + VLOG_DEBUG << "search_string escape removed: " << search_string + << ", size: " << search_string.size(); + } + state->search_state.set_search_string(search_string); + state->function = constant_equals_fn; + state->scalar_function = constant_equals_fn_scalar; + } else if (RE2::FullMatch(pattern_str, LIKE_STARTS_WITH_RE, &search_string)) { + if (VLOG_DEBUG_IS_ON) { + verbose_log_match(pattern_str, "LIKE_STARTS_WITH_RE", LIKE_STARTS_WITH_RE); + VLOG_DEBUG << "search_string : " << search_string << ", size: " << search_string.size(); + } + remove_escape_character(&search_string); + if (VLOG_DEBUG_IS_ON) { + VLOG_DEBUG << "search_string escape removed: " << search_string + << ", size: " << search_string.size(); + } + state->search_state.set_search_string(search_string); + state->function = constant_starts_with_fn; + state->scalar_function = constant_starts_with_fn_scalar; + } else if (RE2::FullMatch(pattern_str, LIKE_ENDS_WITH_RE, &search_string)) { + if (VLOG_DEBUG_IS_ON) { + verbose_log_match(pattern_str, "LIKE_ENDS_WITH_RE", LIKE_ENDS_WITH_RE); + VLOG_DEBUG << "search_string : " << search_string << ", size: " << search_string.size(); + } + remove_escape_character(&search_string); + if (VLOG_DEBUG_IS_ON) { + VLOG_DEBUG << "search_string escape removed: " << search_string + << ", size: " << search_string.size(); + } + state->search_state.set_search_string(search_string); + state->function = constant_ends_with_fn; + state->scalar_function = constant_ends_with_fn_scalar; + } else if (RE2::FullMatch(pattern_str, LIKE_SUBSTRING_RE, &search_string)) { + if (VLOG_DEBUG_IS_ON) { + verbose_log_match(pattern_str, "LIKE_SUBSTRING_RE", LIKE_SUBSTRING_RE); + VLOG_DEBUG << "search_string : " << search_string << ", size: " << search_string.size(); + } + remove_escape_character(&search_string); + if (VLOG_DEBUG_IS_ON) { + VLOG_DEBUG << "search_string escape removed: " << search_string + << ", size: " << search_string.size(); + } + state->search_state.set_search_string(search_string); + state->function = constant_substring_fn; + state->scalar_function = constant_substring_fn_scalar; + } else { + std::string re_pattern; + convert_like_pattern(&state->search_state, pattern_str, &re_pattern); + if (VLOG_DEBUG_IS_ON) { + VLOG_DEBUG << "hyperscan, pattern str: " << pattern_str + << ", size: " << pattern_str.size() << ", re pattern: " << re_pattern + << ", size: " << re_pattern.size(); + } + + hs_database_t* database = nullptr; + hs_scratch_t* scratch = nullptr; + if (try_hyperscan && hs_prepare(context, re_pattern.c_str(), &database, &scratch).ok()) { + // use hyperscan + state->search_state.hs_database.reset(database); + state->search_state.hs_scratch.reset(scratch); + } else { + // fallback to re2 + // reset hs_database to nullptr to indicate not use hyperscan + state->search_state.hs_database.reset(); + state->search_state.hs_scratch.reset(); + + RE2::Options opts; + opts.set_never_nl(false); + opts.set_dot_nl(true); + state->search_state.regex = std::make_unique(re_pattern, opts); + if (!state->search_state.regex->ok()) { + return Status::InternalError("Invalid regex expression: {}(origin: {})", re_pattern, + pattern_str); + } + } + + state->function = constant_regex_fn; + state->scalar_function = constant_regex_fn_scalar; + } + return Status::OK(); +} + Status FunctionLike::open(FunctionContext* context, FunctionContext::FunctionStateScope scope) { if (scope != FunctionContext::THREAD_LOCAL) { return Status::OK(); } std::shared_ptr state = std::make_shared(); - context->set_function_state(scope, state); state->is_like_pattern = true; state->function = like_fn; state->scalar_function = like_fn_scalar; if (context->is_col_constant(1)) { const auto pattern_col = context->get_constant_col(1)->column_ptr; const auto& pattern = pattern_col->get_data_at(0); - - std::string pattern_str = pattern.to_string(); - state->search_state.pattern_str = pattern_str; - std::string search_string; - - if (!pattern_str.empty() && RE2::FullMatch(pattern_str, LIKE_ALLPASS_RE)) { - state->search_state.set_search_string(""); - state->function = constant_allpass_fn; - state->scalar_function = constant_allpass_fn_scalar; - } else if (pattern_str.empty() || - RE2::FullMatch(pattern_str, LIKE_EQUALS_RE, &search_string)) { - if (VLOG_DEBUG_IS_ON) { - verbose_log_match(pattern_str, "LIKE_EQUALS_RE", LIKE_EQUALS_RE); - VLOG_DEBUG << "search_string : " << search_string - << ", size: " << search_string.size(); - } - remove_escape_character(&search_string); - if (VLOG_DEBUG_IS_ON) { - VLOG_DEBUG << "search_string escape removed: " << search_string - << ", size: " << search_string.size(); - } - state->search_state.set_search_string(search_string); - state->function = constant_equals_fn; - state->scalar_function = constant_equals_fn_scalar; - } else if (RE2::FullMatch(pattern_str, LIKE_STARTS_WITH_RE, &search_string)) { - if (VLOG_DEBUG_IS_ON) { - verbose_log_match(pattern_str, "LIKE_STARTS_WITH_RE", LIKE_STARTS_WITH_RE); - VLOG_DEBUG << "search_string : " << search_string - << ", size: " << search_string.size(); - } - remove_escape_character(&search_string); - if (VLOG_DEBUG_IS_ON) { - VLOG_DEBUG << "search_string escape removed: " << search_string - << ", size: " << search_string.size(); - } - state->search_state.set_search_string(search_string); - state->function = constant_starts_with_fn; - state->scalar_function = constant_starts_with_fn_scalar; - } else if (RE2::FullMatch(pattern_str, LIKE_ENDS_WITH_RE, &search_string)) { - if (VLOG_DEBUG_IS_ON) { - verbose_log_match(pattern_str, "LIKE_ENDS_WITH_RE", LIKE_ENDS_WITH_RE); - VLOG_DEBUG << "search_string : " << search_string - << ", size: " << search_string.size(); - } - remove_escape_character(&search_string); - if (VLOG_DEBUG_IS_ON) { - VLOG_DEBUG << "search_string escape removed: " << search_string - << ", size: " << search_string.size(); - } - state->search_state.set_search_string(search_string); - state->function = constant_ends_with_fn; - state->scalar_function = constant_ends_with_fn_scalar; - } else if (RE2::FullMatch(pattern_str, LIKE_SUBSTRING_RE, &search_string)) { - if (VLOG_DEBUG_IS_ON) { - verbose_log_match(pattern_str, "LIKE_SUBSTRING_RE", LIKE_SUBSTRING_RE); - VLOG_DEBUG << "search_string : " << search_string - << ", size: " << search_string.size(); - } - remove_escape_character(&search_string); - if (VLOG_DEBUG_IS_ON) { - VLOG_DEBUG << "search_string escape removed: " << search_string - << ", size: " << search_string.size(); - } - state->search_state.set_search_string(search_string); - state->function = constant_substring_fn; - state->scalar_function = constant_substring_fn_scalar; - } else { - std::string re_pattern; - convert_like_pattern(&state->search_state, pattern_str, &re_pattern); - if (VLOG_DEBUG_IS_ON) { - VLOG_DEBUG << "hyperscan, pattern str: " << pattern_str - << ", size: " << pattern_str.size() << ", re pattern: " << re_pattern - << ", size: " << re_pattern.size(); - } - - hs_database_t* database = nullptr; - hs_scratch_t* scratch = nullptr; - if (hs_prepare(context, re_pattern.c_str(), &database, &scratch).ok()) { - // use hyperscan - state->search_state.hs_database.reset(database); - state->search_state.hs_scratch.reset(scratch); - } else { - // fallback to re2 - // reset hs_database to nullptr to indicate not use hyperscan - state->search_state.hs_database.reset(); - state->search_state.hs_scratch.reset(); - - RE2::Options opts; - opts.set_never_nl(false); - opts.set_dot_nl(true); - state->search_state.regex = std::make_unique(re_pattern, opts); - if (!state->search_state.regex->ok()) { - return Status::InternalError("Invalid regex expression: {}(origin: {})", - re_pattern, pattern_str); - } - } - - state->function = constant_regex_fn; - state->scalar_function = constant_regex_fn_scalar; - } + RETURN_IF_ERROR(construct_like_const_state(context, pattern, state)); } + context->set_function_state(scope, state); + return Status::OK(); } diff --git a/be/src/vec/functions/like.h b/be/src/vec/functions/like.h index 1e9cb2e4fad4d7..d56c4c35389ce6 100644 --- a/be/src/vec/functions/like.h +++ b/be/src/vec/functions/like.h @@ -256,6 +256,10 @@ class FunctionLike : public FunctionLikeBase { Status open(FunctionContext* context, FunctionContext::FunctionStateScope scope) override; + static Status construct_like_const_state(FunctionContext* ctx, const StringRef& pattern, + std::shared_ptr& state, + bool try_hyperscan = true); + friend struct LikeSearchState; friend struct VectorAllpassSearchState; friend struct VectorEqualSearchState; diff --git a/be/src/vec/functions/match.cpp b/be/src/vec/functions/match.cpp index a9e1b6eba8e95e..e3909d766f2587 100644 --- a/be/src/vec/functions/match.cpp +++ b/be/src/vec/functions/match.cpp @@ -19,6 +19,7 @@ #include +#include "olap/rowset/segment_v2/inverted_index/analyzer/analyzer.h" #include "runtime/query_context.h" #include "runtime/runtime_state.h" #include "util/debug_points.h" @@ -174,21 +175,22 @@ inline doris::segment_v2::InvertedIndexQueryType FunctionMatchBase::get_query_ty return doris::segment_v2::InvertedIndexQueryType::UNKNOWN_QUERY; } -void FunctionMatchBase::analyse_query_str_token(std::vector* query_tokens, - InvertedIndexCtx* inverted_index_ctx, - const std::string& match_query_str, - const std::string& column_name) const { +std::vector FunctionMatchBase::analyse_query_str_token( + InvertedIndexCtx* inverted_index_ctx, const std::string& match_query_str, + const std::string& column_name) const { VLOG_DEBUG << "begin to run " << get_name() << ", parser_type: " << inverted_index_parser_type_to_string(inverted_index_ctx->parser_type); + std::vector query_tokens; if (inverted_index_ctx->parser_type == InvertedIndexParserType::PARSER_NONE) { - query_tokens->emplace_back(match_query_str); - return; + query_tokens.emplace_back(match_query_str); + return query_tokens; } - auto reader = doris::segment_v2::InvertedIndexReader::create_reader(inverted_index_ctx, - match_query_str); - doris::segment_v2::InvertedIndexReader::get_analyse_result( - *query_tokens, reader.get(), inverted_index_ctx->analyzer, column_name, - get_query_type_from_fn_name()); + auto reader = doris::segment_v2::inverted_index::InvertedIndexAnalyzer::create_reader( + inverted_index_ctx->char_filter_map); + reader->init(match_query_str.data(), match_query_str.size(), true); + query_tokens = doris::segment_v2::inverted_index::InvertedIndexAnalyzer::get_analyse_result( + reader.get(), inverted_index_ctx->analyzer, column_name, get_query_type_from_fn_name()); + return query_tokens; } inline std::vector FunctionMatchBase::analyse_data_token( @@ -205,14 +207,14 @@ inline std::vector FunctionMatchBase::analyse_data_token( data_tokens.emplace_back(str_ref.to_string()); continue; } - auto reader = doris::segment_v2::InvertedIndexReader::create_reader( - inverted_index_ctx, str_ref.to_string()); - - std::vector element_tokens; - - doris::segment_v2::InvertedIndexReader::get_analyse_result( - element_tokens, reader.get(), inverted_index_ctx->analyzer, column_name, - query_type, false); + auto reader = doris::segment_v2::inverted_index::InvertedIndexAnalyzer::create_reader( + inverted_index_ctx->char_filter_map); + reader->init(str_ref.data, str_ref.size, true); + + std::vector element_tokens = + doris::segment_v2::inverted_index::InvertedIndexAnalyzer::get_analyse_result( + reader.get(), inverted_index_ctx->analyzer, column_name, query_type, + false); data_tokens.insert(data_tokens.end(), element_tokens.begin(), element_tokens.end()); } } else { @@ -220,11 +222,13 @@ inline std::vector FunctionMatchBase::analyse_data_token( if (inverted_index_ctx->parser_type == InvertedIndexParserType::PARSER_NONE) { data_tokens.emplace_back(str_ref.to_string()); } else { - auto reader = doris::segment_v2::InvertedIndexReader::create_reader( - inverted_index_ctx, str_ref.to_string()); - doris::segment_v2::InvertedIndexReader::get_analyse_result( - data_tokens, reader.get(), inverted_index_ctx->analyzer, column_name, - query_type, false); + auto reader = doris::segment_v2::inverted_index::InvertedIndexAnalyzer::create_reader( + inverted_index_ctx->char_filter_map); + reader->init(str_ref.data, str_ref.size, true); + data_tokens = + doris::segment_v2::inverted_index::InvertedIndexAnalyzer::get_analyse_result( + reader.get(), inverted_index_ctx->analyzer, column_name, query_type, + false); } } return data_tokens; @@ -252,8 +256,8 @@ Status FunctionMatchAny::execute_match(FunctionContext* context, const std::stri ColumnUInt8::Container& result) const { RETURN_IF_ERROR(check(context, name)); - std::vector query_tokens; - analyse_query_str_token(&query_tokens, inverted_index_ctx, match_query_str, column_name); + std::vector query_tokens = + analyse_query_str_token(inverted_index_ctx, match_query_str, column_name); if (query_tokens.empty()) { VLOG_DEBUG << fmt::format( "token parser result is empty for query, " @@ -290,8 +294,8 @@ Status FunctionMatchAll::execute_match(FunctionContext* context, const std::stri ColumnUInt8::Container& result) const { RETURN_IF_ERROR(check(context, name)); - std::vector query_tokens; - analyse_query_str_token(&query_tokens, inverted_index_ctx, match_query_str, column_name); + std::vector query_tokens = + analyse_query_str_token(inverted_index_ctx, match_query_str, column_name); if (query_tokens.empty()) { VLOG_DEBUG << fmt::format( "token parser result is empty for query, " @@ -334,8 +338,8 @@ Status FunctionMatchPhrase::execute_match(FunctionContext* context, const std::s ColumnUInt8::Container& result) const { RETURN_IF_ERROR(check(context, name)); - std::vector query_tokens; - analyse_query_str_token(&query_tokens, inverted_index_ctx, match_query_str, column_name); + std::vector query_tokens = + analyse_query_str_token(inverted_index_ctx, match_query_str, column_name); if (query_tokens.empty()) { VLOG_DEBUG << fmt::format( "token parser result is empty for query, " @@ -393,8 +397,8 @@ Status FunctionMatchPhrasePrefix::execute_match( ColumnUInt8::Container& result) const { RETURN_IF_ERROR(check(context, name)); - std::vector query_tokens; - analyse_query_str_token(&query_tokens, inverted_index_ctx, match_query_str, column_name); + std::vector query_tokens = + analyse_query_str_token(inverted_index_ctx, match_query_str, column_name); if (query_tokens.empty()) { VLOG_DEBUG << fmt::format( "token parser result is empty for query, " diff --git a/be/src/vec/functions/match.h b/be/src/vec/functions/match.h index 3026e4a06cf7fd..85298d096b0e68 100644 --- a/be/src/vec/functions/match.h +++ b/be/src/vec/functions/match.h @@ -82,10 +82,9 @@ class FunctionMatchBase : public IFunction { doris::segment_v2::InvertedIndexQueryType get_query_type_from_fn_name() const; - void analyse_query_str_token(std::vector* query_tokens, - InvertedIndexCtx* inverted_index_ctx, - const std::string& match_query_str, - const std::string& field_name) const; + std::vector analyse_query_str_token(InvertedIndexCtx* inverted_index_ctx, + const std::string& match_query_str, + const std::string& field_name) const; std::vector analyse_data_token(const std::string& column_name, InvertedIndexCtx* inverted_index_ctx, diff --git a/be/src/vec/functions/math.cpp b/be/src/vec/functions/math.cpp index af2e68ec9822c8..2d9faaf19bc492 100644 --- a/be/src/vec/functions/math.cpp +++ b/be/src/vec/functions/math.cpp @@ -350,6 +350,79 @@ struct PowName { }; using FunctionPow = FunctionBinaryArithmetic; +class FunctionNormalCdf : public IFunction { +public: + static constexpr auto name = "normal_cdf"; + + String get_name() const override { return name; } + + static FunctionPtr create() { return std::make_shared(); } + + DataTypePtr get_return_type_impl(const DataTypes& arguments) const override { + return make_nullable(std::make_shared()); + } + + DataTypes get_variadic_argument_types_impl() const override { + return {std::make_shared(), std::make_shared(), + std::make_shared()}; + } + size_t get_number_of_arguments() const override { return 3; } + + Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, + size_t result, size_t input_rows_count) const override { + auto result_column = ColumnFloat64::create(input_rows_count); + auto result_null_map_column = ColumnUInt8::create(input_rows_count, 0); + + auto& result_data = result_column->get_data(); + NullMap& result_null_map = + assert_cast(result_null_map_column.get())->get_data(); + + ColumnPtr argument_columns[3]; + bool col_const[3]; + size_t argument_size = arguments.size(); + for (int i = 0; i < argument_size; ++i) { + argument_columns[i] = block.get_by_position(arguments[i]).column; + col_const[i] = is_column_const(*argument_columns[i]); + if (col_const[i]) { + argument_columns[i] = + static_cast(*argument_columns[i]).get_data_column_ptr(); + } + } + + auto* mean_col = assert_cast(argument_columns[0].get()); + auto* sd_col = assert_cast(argument_columns[1].get()); + auto* value_col = assert_cast(argument_columns[2].get()); + + result_column->reserve(input_rows_count); + for (size_t i = 0; i < input_rows_count; ++i) { + double mean = mean_col->get_element(index_check_const(i, col_const[0])); + double sd = sd_col->get_element(index_check_const(i, col_const[1])); + double v = value_col->get_element(index_check_const(i, col_const[2])); + + if (!check_argument(sd)) [[unlikely]] { + result_null_map[i] = true; + continue; + } + result_data[i] = calculate_cell(mean, sd, v); + } + + block.get_by_position(result).column = + ColumnNullable::create(std::move(result_column), std::move(result_null_map_column)); + return Status::OK(); + } + + static bool check_argument(double sd) { return sd > 0; } + static double calculate_cell(double mean, double sd, double v) { +#ifdef __APPLE__ + const double sqrt2 = std::sqrt(2); +#else + constexpr double sqrt2 = std::numbers::sqrt2; +#endif + + return 0.5 * (std::erf((v - mean) / (sd * sqrt2)) + 1); + } +}; + // TODO: Now math may cause one thread compile time too long, because the function in math // so mush. Split it to speed up compile time in the future void register_function_math(SimpleFunctionFactory& factory) { @@ -386,5 +459,6 @@ void register_function_math(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function(); factory.register_function(); + factory.register_function(); } } // namespace doris::vectorized diff --git a/be/src/vec/functions/simple_function_factory.h b/be/src/vec/functions/simple_function_factory.h index 33a3202c18e298..d8b544d5bfdabb 100644 --- a/be/src/vec/functions/simple_function_factory.h +++ b/be/src/vec/functions/simple_function_factory.h @@ -151,14 +151,6 @@ class SimpleFunctionFactory { function_alias[alias] = name; } - /// @TEMPORARY: for be_exec_version=4 - template - void register_alternative_function() { - static std::string suffix {"_old_for_version_before_5_0"}; - function_to_replace[Function::name] = Function::name + suffix; - register_function(Function::name + suffix, &createDefaultFunction); - } - FunctionBasePtr get_function(const std::string& name, const ColumnsWithTypeAndName& arguments, const DataTypePtr& return_type, int be_version = BeExecVersionManager::get_newest_version()) { diff --git a/be/src/vec/olap/olap_data_convertor.cpp b/be/src/vec/olap/olap_data_convertor.cpp index 8dcdd977d9a57f..c0b888898713e5 100644 --- a/be/src/vec/olap/olap_data_convertor.cpp +++ b/be/src/vec/olap/olap_data_convertor.cpp @@ -86,7 +86,17 @@ OlapBlockDataConvertor::OlapColumnDataConvertorBaseUPtr OlapBlockDataConvertor::create_array_convertor(const TabletColumn& column) { const auto& sub_column = column.get_sub_column(0); return std::make_unique( - create_olap_column_data_convertor(sub_column)); + create_olap_column_data_convertor(sub_column), sub_column); +} + +OlapBlockDataConvertor::OlapColumnDataConvertorBaseUPtr +OlapBlockDataConvertor::create_struct_convertor(const TabletColumn& column) { + std::vector sub_convertors; + for (uint32_t i = 0; i < column.get_subtype_count(); i++) { + const TabletColumn& sub_column = column.get_sub_column(i); + sub_convertors.emplace_back(create_olap_column_data_convertor(sub_column)); + } + return std::make_unique(sub_convertors); } OlapBlockDataConvertor::OlapColumnDataConvertorBaseUPtr @@ -104,6 +114,12 @@ OlapBlockDataConvertor::create_agg_state_convertor(const TabletColumn& column) { } else if (type == PrimitiveType::INVALID_TYPE) { // INVALID_TYPE means function's serialized type is fixed object return std::make_unique(); + } else if (type == PrimitiveType::TYPE_MAP) { + return create_map_convertor(column); + } else if (type == PrimitiveType::TYPE_STRUCT) { + return create_struct_convertor(column); + } else if (type == PrimitiveType::TYPE_ARRAY) { + return create_array_convertor(column); } else { throw Exception(ErrorCode::INTERNAL_ERROR, "OLAP_FIELD_TYPE_AGG_STATE meet unsupported type: {}", @@ -199,12 +215,7 @@ OlapBlockDataConvertor::create_olap_column_data_convertor(const TabletColumn& co return std::make_unique(); } case FieldType::OLAP_FIELD_TYPE_STRUCT: { - std::vector sub_convertors; - for (uint32_t i = 0; i < column.get_subtype_count(); i++) { - const TabletColumn& sub_column = column.get_sub_column(i); - sub_convertors.emplace_back(create_olap_column_data_convertor(sub_column)); - } - return std::make_unique(sub_convertors); + return create_struct_convertor(column); } case FieldType::OLAP_FIELD_TYPE_ARRAY: { return create_array_convertor(column); @@ -286,7 +297,7 @@ void OlapBlockDataConvertor::OlapColumnDataConvertorBase::set_source_column( _row_pos = row_pos; _num_rows = num_rows; if (_typed_column.column->is_nullable()) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); _nullmap = nullable_column->get_null_map_data().data(); } @@ -332,7 +343,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorBitMap::convert_to_olap() assert(_typed_column.column); const vectorized::ColumnBitmap* column_bitmap = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_bitmap = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -341,8 +352,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorBitMap::convert_to_olap() } assert(column_bitmap); - BitmapValue* bitmap_value = - const_cast(column_bitmap->get_data().data() + _row_pos); + auto* bitmap_value = const_cast(column_bitmap->get_data().data() + _row_pos); BitmapValue* bitmap_value_cur = bitmap_value; BitmapValue* bitmap_value_end = bitmap_value_cur + _num_rows; @@ -410,7 +420,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorQuantileState::convert_to_ const vectorized::ColumnQuantileState* column_quantile_state = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_quantile_state = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -420,7 +430,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorQuantileState::convert_to_ } assert(column_quantile_state); - QuantileState* quantile_state = + auto* quantile_state = const_cast(column_quantile_state->get_data().data() + _row_pos); QuantileState* quantile_state_cur = quantile_state; QuantileState* quantile_state_end = quantile_state_cur + _num_rows; @@ -488,7 +498,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorHLL::convert_to_olap() { assert(_typed_column.column); const vectorized::ColumnHLL* column_hll = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_hll = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -497,7 +507,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorHLL::convert_to_olap() { } assert(column_hll); - HyperLogLog* hll_value = const_cast(column_hll->get_data().data() + _row_pos); + auto* hll_value = const_cast(column_hll->get_data().data() + _row_pos); HyperLogLog* hll_value_cur = hll_value; HyperLogLog* hll_value_end = hll_value_cur + _num_rows; @@ -588,7 +598,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorChar::convert_to_olap() { assert(_typed_column.column); const vectorized::ColumnString* column_string = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_string = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -695,7 +705,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorVarChar::convert_to_olap() assert(_typed_column.column); const vectorized::ColumnString* column_string = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_string = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -731,7 +741,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorAggState::convert_to_olap( assert(_typed_column.column); const vectorized::ColumnFixedLengthObject* column_fixed_object = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_fixed_object = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -743,8 +753,8 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorAggState::convert_to_olap( assert(column_fixed_object); auto item_size = column_fixed_object->item_size(); - auto cur_values = (uint8_t*)(column_fixed_object->get_data().data()) + (item_size * _row_pos); - auto end_values = cur_values + (item_size * _num_rows); + auto* cur_values = (uint8_t*)(column_fixed_object->get_data().data()) + (item_size * _row_pos); + auto* end_values = cur_values + (item_size * _num_rows); Slice* slice = _slice.data(); if (_nullmap) { @@ -785,7 +795,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorDate::convert_to_olap() { assert(_typed_column.column); const vectorized::ColumnVector* column_datetime = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_datetime = assert_cast*>( nullable_column->get_nested_column_ptr().get()); @@ -834,7 +844,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorDateTime::convert_to_olap( assert(_typed_column.column); const vectorized::ColumnVector* column_datetime = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_datetime = assert_cast*>( nullable_column->get_nested_column_ptr().get()); @@ -877,7 +887,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorDecimal::convert_to_olap() assert(_typed_column.column); const vectorized::ColumnDecimal* column_decimal = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_decimal = assert_cast*>( nullable_column->get_nested_column_ptr().get()); @@ -939,7 +949,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorStruct::convert_to_olap() const vectorized::ColumnStruct* column_struct = nullptr; const vectorized::DataTypeStruct* data_type_struct = nullptr; if (_nullmap) { - auto nullable_column = + const auto* nullable_column = assert_cast(_typed_column.column.get()); column_struct = assert_cast( nullable_column->get_nested_column_ptr().get()); @@ -972,27 +982,21 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorStruct::convert_to_olap() Status OlapBlockDataConvertor::OlapColumnDataConvertorArray::convert_to_olap() { const ColumnArray* column_array = nullptr; - const DataTypeArray* data_type_array = nullptr; if (_nullmap) { const auto* nullable_column = assert_cast(_typed_column.column.get()); column_array = assert_cast(nullable_column->get_nested_column_ptr().get()); - data_type_array = assert_cast( - (assert_cast(_typed_column.type.get())->get_nested_type()) - .get()); } else { column_array = assert_cast(_typed_column.column.get()); - data_type_array = assert_cast(_typed_column.type.get()); } assert(column_array); - assert(data_type_array); - return convert_to_olap(column_array, data_type_array); + return convert_to_olap(column_array); } Status OlapBlockDataConvertor::OlapColumnDataConvertorArray::convert_to_olap( - const ColumnArray* column_array, const DataTypeArray* data_type_array) { + const ColumnArray* column_array) { ColumnPtr item_data = column_array->get_data_ptr(); auto start_offset = column_array->offset_at(_row_pos); @@ -1008,7 +1012,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorArray::convert_to_olap( _base_offset += elem_size; - ColumnWithTypeAndName item_typed_column = {item_data, data_type_array->get_nested_type(), + ColumnWithTypeAndName item_typed_column = {item_data, _data_type.get_nested_type(), "array.item"}; _item_convertor->set_source_column(item_typed_column, start_offset, elem_size); RETURN_IF_ERROR(_item_convertor->convert_to_olap()); diff --git a/be/src/vec/olap/olap_data_convertor.h b/be/src/vec/olap/olap_data_convertor.h index 500fc7dfc4ab4d..3473d9d26b5205 100644 --- a/be/src/vec/olap/olap_data_convertor.h +++ b/be/src/vec/olap/olap_data_convertor.h @@ -45,6 +45,7 @@ #include "vec/core/column_with_type_and_name.h" #include "vec/core/types.h" #include "vec/data_types/data_type.h" +#include "vec/data_types/data_type_array.h" #include "vec/data_types/data_type_factory.hpp" #include "vec/data_types/data_type_map.h" #include "vec/data_types/data_type_object.h" @@ -58,7 +59,6 @@ namespace vectorized { class Block; class ColumnArray; -class DataTypeArray; class ColumnMap; class DataTypeMap; template @@ -101,6 +101,7 @@ class OlapBlockDataConvertor { const TabletColumn& column); static OlapColumnDataConvertorBaseUPtr create_map_convertor(const TabletColumn& column); static OlapColumnDataConvertorBaseUPtr create_array_convertor(const TabletColumn& column); + static OlapColumnDataConvertorBaseUPtr create_struct_convertor(const TabletColumn& column); static OlapColumnDataConvertorBaseUPtr create_agg_state_convertor(const TabletColumn& column); // accessors for different data types; @@ -444,8 +445,10 @@ class OlapBlockDataConvertor { class OlapColumnDataConvertorArray : public OlapColumnDataConvertorBase { public: - OlapColumnDataConvertorArray(OlapColumnDataConvertorBaseUPtr item_convertor) - : _item_convertor(std::move(item_convertor)) { + OlapColumnDataConvertorArray(OlapColumnDataConvertorBaseUPtr item_convertor, + const TabletColumn& column) + : _item_convertor(std::move(item_convertor)), + _data_type(DataTypeFactory::instance().create_data_type(column)) { _base_offset = 0; _results.resize(4); // size + offset + item_data + item_nullmap } @@ -458,15 +461,13 @@ class OlapBlockDataConvertor { Status convert_to_olap() override; private: - // Status convert_to_olap(const UInt8* null_map, const ColumnArray* column_array, - // const DataTypeArray* data_type_array); - Status convert_to_olap(const ColumnArray* column_array, - const DataTypeArray* data_type_array); + Status convert_to_olap(const ColumnArray* column_array); OlapColumnDataConvertorBaseUPtr _item_convertor; UInt64 _base_offset; PaddedPODArray _offsets; // array offsets in disk layout // size + offsets_data + item_data + item_nullmap std::vector _results; + DataTypeArray _data_type; }; class OlapColumnDataConvertorMap : public OlapColumnDataConvertorBase { diff --git a/be/src/vec/runtime/vdata_stream_mgr.cpp b/be/src/vec/runtime/vdata_stream_mgr.cpp index a5db9a6150dcfa..c14d119e0fee25 100644 --- a/be/src/vec/runtime/vdata_stream_mgr.cpp +++ b/be/src/vec/runtime/vdata_stream_mgr.cpp @@ -124,7 +124,7 @@ Status VDataStreamMgr::transmit_block(const PTransmitDataParams* request, // // TODO: Rethink the lifecycle of DataStreamRecvr to distinguish // errors from receiver-initiated teardowns. - return Status::OK(); // local data stream receiver closed + return Status::EndOfFile("data stream receiver closed"); } // Lock the fragment context to ensure the runtime state and other objects are not @@ -134,7 +134,7 @@ Status VDataStreamMgr::transmit_block(const PTransmitDataParams* request, // Do not return internal error, because when query finished, the downstream node // may finish before upstream node. And the object maybe deconstructed. If return error // then the upstream node may report error status to FE, the query is failed. - return Status::OK(); // data stream receiver is deconstructed + return Status::EndOfFile("data stream receiver is deconstructed"); } bool eos = request->eos(); @@ -218,4 +218,4 @@ void VDataStreamMgr::cancel(const TUniqueId& fragment_instance_id, Status exec_s } } // namespace vectorized -} // namespace doris \ No newline at end of file +} // namespace doris diff --git a/be/src/vec/sink/vdata_stream_sender.cpp b/be/src/vec/sink/vdata_stream_sender.cpp index dd221c6aaa31c7..fb2f24ee0e1817 100644 --- a/be/src/vec/sink/vdata_stream_sender.cpp +++ b/be/src/vec/sink/vdata_stream_sender.cpp @@ -56,7 +56,7 @@ namespace doris::vectorized { template -Status Channel::init_stub(RuntimeState* state) { +Status Channel::init(RuntimeState* state) { if (_brpc_dest_addr.hostname.empty()) { LOG(WARNING) << "there is no brpc destination address's hostname" ", maybe version is not compatible."; @@ -66,12 +66,6 @@ Status Channel::init_stub(RuntimeState* state) { _is_local &= state->query_options().enable_local_exchange; } if (_is_local) { - auto st = _parent->state()->exec_env()->vstream_mgr()->find_recvr( - _fragment_instance_id, _dest_node_id, &_local_recvr); - if (!st.ok()) { - // Recvr not found. Maybe downstream task is finished already. - LOG(INFO) << "Recvr is not found : " << st.to_string(); - } return Status::OK(); } if (_brpc_dest_addr.hostname == BackendOptions::get_localhost()) { @@ -92,6 +86,14 @@ Status Channel::init_stub(RuntimeState* state) { template Status Channel::open(RuntimeState* state) { + if (_is_local) { + auto st = _parent->state()->exec_env()->vstream_mgr()->find_recvr( + _fragment_instance_id, _dest_node_id, &_local_recvr); + if (!st.ok()) { + // Recvr not found. Maybe downstream task is finished already. + LOG(INFO) << "Recvr is not found : " << st.to_string(); + } + } _be_number = state->be_number(); _brpc_request = std::make_shared(); // initialize brpc request diff --git a/be/src/vec/sink/vdata_stream_sender.h b/be/src/vec/sink/vdata_stream_sender.h index 0ceec97f1fc4de..7c86a62519a851 100644 --- a/be/src/vec/sink/vdata_stream_sender.h +++ b/be/src/vec/sink/vdata_stream_sender.h @@ -135,7 +135,7 @@ class Channel { // Initialize channel. // Returns OK if successful, error indication otherwise. - Status init_stub(RuntimeState* state); + Status init(RuntimeState* state); Status open(RuntimeState* state); // Asynchronously sends a row batch. @@ -188,7 +188,8 @@ class Channel { if (_local_recvr && !_local_recvr->is_closed()) { return true; } - _receiver_status = Status::OK(); // local data stream receiver closed + _receiver_status = Status::EndOfFile( + "local data stream receiver closed"); // local data stream receiver closed return false; } diff --git a/be/src/vec/sink/vtablet_block_convertor.cpp b/be/src/vec/sink/vtablet_block_convertor.cpp index 617668c035af45..36d9a034cf777a 100644 --- a/be/src/vec/sink/vtablet_block_convertor.cpp +++ b/be/src/vec/sink/vtablet_block_convertor.cpp @@ -211,7 +211,7 @@ Status OlapTableBlockConvertor::_internal_validate_column( auto string_column_checker = [&](const ColumnString* column_string) { size_t limit = config::string_type_length_soft_limit_bytes; // when type.len is negative, std::min will return overflow value, so we need to check it - if (type.len > 0) { + if (type.len >= 0) { limit = std::min(config::string_type_length_soft_limit_bytes, type.len); } diff --git a/be/src/vec/sink/writer/vtablet_writer.cpp b/be/src/vec/sink/writer/vtablet_writer.cpp index b9eaf79616fc2f..22be788a18b97a 100644 --- a/be/src/vec/sink/writer/vtablet_writer.cpp +++ b/be/src/vec/sink/writer/vtablet_writer.cpp @@ -64,6 +64,7 @@ #include "runtime/descriptors.h" #include "runtime/exec_env.h" #include "runtime/memory/memory_reclamation.h" +#include "runtime/query_context.h" #include "runtime/runtime_state.h" #include "runtime/thread_context.h" #include "service/backend_options.h" @@ -383,6 +384,16 @@ Status VNodeChannel::init(RuntimeState* state) { // a relatively large value to improve the import performance. _batch_size = std::max(_batch_size, 8192); + if (_state) { + QueryContext* query_ctx = _state->get_query_ctx(); + if (query_ctx) { + auto wg_ptr = query_ctx->workload_group(); + if (wg_ptr) { + _wg_id = wg_ptr->id(); + } + } + } + _inited = true; return Status::OK(); } @@ -426,6 +437,10 @@ void VNodeChannel::_open_internal(bool is_incremental) { request->set_txn_expiration(_parent->_txn_expiration); request->set_write_file_cache(_parent->_write_file_cache); + if (_wg_id > 0) { + request->set_workload_group_id(_wg_id); + } + auto open_callback = DummyBrpcCallback::create_shared(); auto open_closure = AutoReleaseClosure< PTabletWriterOpenRequest, @@ -1165,6 +1180,7 @@ Status VTabletWriter::_init(RuntimeState* state, RuntimeProfile* profile) { _schema.reset(new OlapTableSchemaParam()); RETURN_IF_ERROR(_schema->init(table_sink.schema)); _schema->set_timestamp_ms(state->timestamp_ms()); + _schema->set_nano_seconds(state->nano_seconds()); _schema->set_timezone(state->timezone()); _location = _pool->add(new OlapTableLocationParam(table_sink.location)); _nodes_info = _pool->add(new DorisNodesInfo(table_sink.nodes_info)); diff --git a/be/src/vec/sink/writer/vtablet_writer.h b/be/src/vec/sink/writer/vtablet_writer.h index e7a89824ba3f5a..52aa0f6b918057 100644 --- a/be/src/vec/sink/writer/vtablet_writer.h +++ b/be/src/vec/sink/writer/vtablet_writer.h @@ -413,6 +413,8 @@ class VNodeChannel { // send block to slave BE rely on this. dont reconstruct it. std::shared_ptr> _send_block_callback = nullptr; + int64_t _wg_id = -1; + bool _is_incremental; }; diff --git a/be/src/vec/sink/writer/vtablet_writer_v2.cpp b/be/src/vec/sink/writer/vtablet_writer_v2.cpp index 780ea8283bdf3d..aaffefbd894e4d 100644 --- a/be/src/vec/sink/writer/vtablet_writer_v2.cpp +++ b/be/src/vec/sink/writer/vtablet_writer_v2.cpp @@ -44,6 +44,7 @@ #include "util/debug_points.h" #include "util/defer_op.h" #include "util/doris_metrics.h" +#include "util/runtime_profile.h" #include "util/uid_util.h" #include "vec/core/block.h" #include "vec/sink/delta_writer_v2_pool.h" @@ -150,6 +151,7 @@ Status VTabletWriterV2::_init(RuntimeState* state, RuntimeProfile* profile) { _schema.reset(new OlapTableSchemaParam()); RETURN_IF_ERROR(_schema->init(table_sink.schema)); _schema->set_timestamp_ms(state->timestamp_ms()); + _schema->set_nano_seconds(state->nano_seconds()); _schema->set_timezone(state->timezone()); _location = _pool->add(new OlapTableLocationParam(table_sink.location)); _nodes_info = _pool->add(new DorisNodesInfo(table_sink.nodes_info)); @@ -226,11 +228,14 @@ Status VTabletWriterV2::_init(RuntimeState* state, RuntimeProfile* profile) { _input_rows_counter = ADD_COUNTER(_profile, "RowsRead", TUnit::UNIT); _output_rows_counter = ADD_COUNTER(_profile, "RowsProduced", TUnit::UNIT); _filtered_rows_counter = ADD_COUNTER(_profile, "RowsFiltered", TUnit::UNIT); - _send_data_timer = ADD_TIMER(_profile, "SendDataTime"); - _wait_mem_limit_timer = ADD_CHILD_TIMER(_profile, "WaitMemLimitTime", "SendDataTime"); - _row_distribution_timer = ADD_CHILD_TIMER(_profile, "RowDistributionTime", "SendDataTime"); - _write_memtable_timer = ADD_CHILD_TIMER(_profile, "WriteMemTableTime", "SendDataTime"); - _validate_data_timer = ADD_TIMER(_profile, "ValidateDataTime"); + _send_data_timer = ADD_TIMER_WITH_LEVEL(_profile, "SendDataTime", 1); + _wait_mem_limit_timer = + ADD_CHILD_TIMER_WITH_LEVEL(_profile, "WaitMemLimitTime", "SendDataTime", 1); + _row_distribution_timer = + ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RowDistributionTime", "SendDataTime", 1); + _write_memtable_timer = + ADD_CHILD_TIMER_WITH_LEVEL(_profile, "WriteMemTableTime", "SendDataTime", 1); + _validate_data_timer = ADD_TIMER_WITH_LEVEL(_profile, "ValidateDataTime", 1); _open_timer = ADD_TIMER(_profile, "OpenTime"); _close_timer = ADD_TIMER(_profile, "CloseWaitTime"); _close_writer_timer = ADD_CHILD_TIMER(_profile, "CloseWriterTime", "CloseWaitTime"); diff --git a/be/test/olap/delta_writer_cluster_key_test.cpp b/be/test/olap/delta_writer_cluster_key_test.cpp new file mode 100644 index 00000000000000..9c3e64109c4440 --- /dev/null +++ b/be/test/olap/delta_writer_cluster_key_test.cpp @@ -0,0 +1,365 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "common/config.h" +#include "common/object_pool.h" +#include "exec/tablet_info.h" +#include "gen_cpp/Descriptors_types.h" +#include "gen_cpp/Types_types.h" +#include "gen_cpp/internal_service.pb.h" +#include "gtest/gtest_pred_impl.h" +#include "io/fs/local_file_system.h" +#include "olap/data_dir.h" +#include "olap/delta_writer.h" +#include "olap/iterators.h" +#include "olap/olap_define.h" +#include "olap/options.h" +#include "olap/rowset/beta_rowset.h" +#include "olap/rowset/segment_v2/segment.h" +#include "olap/rowset_builder.h" +#include "olap/schema.h" +#include "olap/short_key_index.h" +#include "olap/storage_engine.h" +#include "olap/tablet.h" +#include "olap/tablet_manager.h" +#include "olap/task/engine_publish_version_task.h" +#include "olap/txn_manager.h" +#include "runtime/decimalv2_value.h" +#include "runtime/define_primitive_type.h" +#include "runtime/descriptor_helper.h" +#include "runtime/descriptors.h" +#include "runtime/exec_env.h" +#include "vec/columns/column.h" +#include "vec/core/block.h" +#include "vec/core/column_with_type_and_name.h" +#include "vec/runtime/vdatetime_value.h" + +namespace doris { +class OlapMeta; + +// This is DeltaWriter unit test which used by streaming load. +// And also it should take schema change into account after streaming load. + +static const uint32_t MAX_PATH_LEN = 1024; +static StorageEngine* engine_ref = nullptr; + +static void set_up() { + char buffer[MAX_PATH_LEN]; + EXPECT_NE(getcwd(buffer, MAX_PATH_LEN), nullptr); + config::storage_root_path = std::string(buffer) + "/data_test"; + auto st = io::global_local_filesystem()->delete_directory(config::storage_root_path); + ASSERT_TRUE(st.ok()) << st; + st = io::global_local_filesystem()->create_directory(config::storage_root_path); + ASSERT_TRUE(st.ok()) << st; + std::vector paths; + paths.emplace_back(config::storage_root_path, -1); + + doris::EngineOptions options; + options.store_paths = paths; + auto engine = std::make_unique(options); + engine_ref = engine.get(); + Status s = engine->open(); + ASSERT_TRUE(s.ok()) << s; + ASSERT_TRUE(s.ok()) << s; + + ExecEnv* exec_env = doris::ExecEnv::GetInstance(); + exec_env->set_memtable_memory_limiter(new MemTableMemoryLimiter()); + exec_env->set_storage_engine(std::move(engine)); +} + +static void tear_down() { + ExecEnv* exec_env = doris::ExecEnv::GetInstance(); + exec_env->set_memtable_memory_limiter(nullptr); + engine_ref = nullptr; + exec_env->set_storage_engine(nullptr); + EXPECT_EQ(system("rm -rf ./data_test"), 0); + static_cast(io::global_local_filesystem()->delete_directory( + std::string(getenv("DORIS_HOME")) + "/" + UNUSED_PREFIX)); +} + +static void create_tablet_request_with_sequence_col(int64_t tablet_id, int32_t schema_hash, + TCreateTabletReq* request, + bool enable_mow = false) { + request->tablet_id = tablet_id; + request->__set_version(1); + request->partition_id = 30004; + request->tablet_schema.schema_hash = schema_hash; + request->tablet_schema.short_key_column_count = 2; + request->tablet_schema.keys_type = TKeysType::UNIQUE_KEYS; + request->tablet_schema.storage_type = TStorageType::COLUMN; + request->tablet_schema.__set_sequence_col_idx(4); + request->__set_storage_format(TStorageFormat::V2); + request->__set_enable_unique_key_merge_on_write(enable_mow); + request->tablet_schema.cluster_key_idxes.push_back(1); + request->tablet_schema.cluster_key_idxes.push_back(0); + + TColumn k1; + k1.column_name = "k1"; + k1.__set_is_key(true); + k1.__set_col_unique_id(0); + k1.column_type.type = TPrimitiveType::TINYINT; + request->tablet_schema.columns.push_back(k1); + + TColumn k2; + k2.column_name = "k2"; + k2.__set_is_key(true); + k1.__set_col_unique_id(1); + k2.column_type.type = TPrimitiveType::SMALLINT; + request->tablet_schema.columns.push_back(k2); + + TColumn v1; + v1.column_name = "v1"; + v1.__set_is_key(false); + k1.__set_col_unique_id(2); + v1.column_type.type = TPrimitiveType::DATETIME; + v1.__set_aggregation_type(TAggregationType::REPLACE); + request->tablet_schema.columns.push_back(v1); + + TColumn v2; + v2.column_name = "v2"; + v2.__set_is_key(false); + k1.__set_col_unique_id(3); + v2.column_type.type = TPrimitiveType::DATEV2; + v2.__set_aggregation_type(TAggregationType::REPLACE); + request->tablet_schema.columns.push_back(v2); + + TColumn sequence_col; + sequence_col.column_name = SEQUENCE_COL; + sequence_col.__set_is_key(false); + k1.__set_col_unique_id(4); + sequence_col.column_type.type = TPrimitiveType::INT; + sequence_col.__set_aggregation_type(TAggregationType::REPLACE); + request->tablet_schema.columns.push_back(sequence_col); +} + +static TDescriptorTable create_descriptor_tablet_with_sequence_col() { + TDescriptorTableBuilder dtb; + TTupleDescriptorBuilder tuple_builder; + + tuple_builder.add_slot( + TSlotDescriptorBuilder().type(TYPE_TINYINT).column_name("k1").column_pos(0).build()); + tuple_builder.add_slot( + TSlotDescriptorBuilder().type(TYPE_SMALLINT).column_name("k2").column_pos(1).build()); + tuple_builder.add_slot(TSlotDescriptorBuilder() + .type(TYPE_DATETIME) + .column_name("v1") + .column_pos(2) + .nullable(false) + .build()); + tuple_builder.add_slot(TSlotDescriptorBuilder() + .type(TYPE_DATEV2) + .column_name("v2") + .column_pos(3) + .nullable(false) + .build()); + tuple_builder.add_slot(TSlotDescriptorBuilder() + .type(TYPE_INT) + .column_name(SEQUENCE_COL) + .column_pos(4) + .nullable(false) + .build()); + tuple_builder.build(&dtb); + + return dtb.desc_tbl(); +} + +static void generate_data(vectorized::Block* block, int8_t k1, int16_t k2, int32_t seq) { + auto columns = block->mutate_columns(); + int8_t c1 = k1; + columns[0]->insert_data((const char*)&c1, sizeof(c1)); + + int16_t c2 = k2; + columns[1]->insert_data((const char*)&c2, sizeof(c2)); + + VecDateTimeValue c3; + c3.from_date_str("2020-07-16 19:39:43", 19); + int64_t c3_int = c3.to_int64(); + columns[2]->insert_data((const char*)&c3_int, sizeof(c3)); + + DateV2Value c4; + c4.unchecked_set_time(2022, 6, 6, 0, 0, 0, 0); + uint32_t c4_int = c4.to_date_int_val(); + columns[3]->insert_data((const char*)&c4_int, sizeof(c4)); + + int32_t c5 = seq; + columns[4]->insert_data((const char*)&c5, sizeof(c2)); +} + +class TestDeltaWriterClusterKey : public ::testing::Test { +public: + TestDeltaWriterClusterKey() {} + ~TestDeltaWriterClusterKey() {} + static void SetUpTestSuite() { + config::min_file_descriptor_number = 100; + set_up(); + } + + static void TearDownTestSuite() { tear_down(); } +}; + +TEST_F(TestDeltaWriterClusterKey, vec_sequence_col) { + std::unique_ptr profile = std::make_unique("CreateTablet"); + TCreateTabletReq request; + // sleep(20); + create_tablet_request_with_sequence_col(10005, 270068377, &request, true); + Status res = engine_ref->create_tablet(request, profile.get()); + ASSERT_TRUE(res.ok()); + + TDescriptorTable tdesc_tbl = create_descriptor_tablet_with_sequence_col(); + ObjectPool obj_pool; + DescriptorTbl* desc_tbl = nullptr; + static_cast(DescriptorTbl::create(&obj_pool, tdesc_tbl, &desc_tbl)); + TupleDescriptor* tuple_desc = desc_tbl->get_tuple_descriptor(0); + auto param = std::make_shared(); + + PUniqueId load_id; + load_id.set_hi(0); + load_id.set_lo(0); + WriteRequest write_req; + write_req.tablet_id = 10005; + write_req.schema_hash = 270068377; + write_req.txn_id = 20003; + write_req.partition_id = 30003; + write_req.load_id = load_id; + write_req.tuple_desc = tuple_desc; + write_req.slots = &(tuple_desc->slots()); + write_req.is_high_priority = false; + write_req.table_schema_param = param; + profile = std::make_unique("LoadChannels"); + auto delta_writer = + std::make_unique(*engine_ref, write_req, profile.get(), TUniqueId {}); + + vectorized::Block block; + for (const auto& slot_desc : tuple_desc->slots()) { + block.insert(vectorized::ColumnWithTypeAndName(slot_desc->get_empty_mutable_column(), + slot_desc->get_data_type_ptr(), + slot_desc->col_name())); + } + + auto rows = 4; + generate_data(&block, 123, 456, 100); + res = delta_writer->write(&block, {0}); + ASSERT_TRUE(res.ok()); + generate_data(&block, 123, 457, 100); + res = delta_writer->write(&block, {1}); + ASSERT_TRUE(res.ok()); + generate_data(&block, 123, 455, 90); + res = delta_writer->write(&block, {2}); + ASSERT_TRUE(res.ok()); + generate_data(&block, 123, 457, 90); // row 1 has larger sequence number + res = delta_writer->write(&block, {3}); + ASSERT_TRUE(res.ok()); + generate_data(&block, 122, 456, 90); + res = delta_writer->write(&block, {4}); + ASSERT_TRUE(res.ok()); + + res = delta_writer->close(); + ASSERT_TRUE(res.ok()); + res = delta_writer->wait_flush(); + ASSERT_TRUE(res.ok()); + res = delta_writer->build_rowset(); + ASSERT_TRUE(res.ok()); + res = delta_writer->submit_calc_delete_bitmap_task(); + ASSERT_TRUE(res.ok()); + res = delta_writer->wait_calc_delete_bitmap(); + ASSERT_TRUE(res.ok()); + res = delta_writer->commit_txn(PSlaveTabletNodes()); + ASSERT_TRUE(res.ok()); + + // publish version success + TabletSharedPtr tablet = engine_ref->tablet_manager()->get_tablet(write_req.tablet_id); + std::cout << "before publish, tablet row nums:" << tablet->num_rows() << std::endl; + OlapMeta* meta = tablet->data_dir()->get_meta(); + Version version; + version.first = tablet->get_rowset_with_max_version()->end_version() + 1; + version.second = tablet->get_rowset_with_max_version()->end_version() + 1; + std::cout << "start to add rowset version:" << version.first << "-" << version.second + << std::endl; + std::map tablet_related_rs; + engine_ref->txn_manager()->get_txn_related_tablets(write_req.txn_id, write_req.partition_id, + &tablet_related_rs); + ASSERT_EQ(1, tablet_related_rs.size()); + + std::cout << "start to publish txn" << std::endl; + RowsetSharedPtr rowset = tablet_related_rs.begin()->second; + TabletPublishStatistics pstats; + res = engine_ref->txn_manager()->publish_txn( + meta, write_req.partition_id, write_req.txn_id, write_req.tablet_id, + tablet_related_rs.begin()->first.tablet_uid, version, &pstats); + ASSERT_TRUE(res.ok()); + std::cout << "start to add inc rowset:" << rowset->rowset_id() + << ", num rows:" << rowset->num_rows() << ", version:" << rowset->version().first + << "-" << rowset->version().second << std::endl; + res = tablet->add_inc_rowset(rowset); + ASSERT_TRUE(res.ok()); + ASSERT_EQ(rows, tablet->num_rows()); + std::vector segments; + res = ((BetaRowset*)rowset.get())->load_segments(&segments); + ASSERT_TRUE(res.ok()); + ASSERT_EQ(1, rowset->num_segments()); + ASSERT_EQ(1, segments.size()); + + // read data, verify the data correct + OlapReaderStatistics stats; + StorageReadOptions opts; + opts.stats = &stats; + opts.tablet_schema = rowset->tablet_schema(); + + std::unique_ptr iter; + std::shared_ptr schema = std::make_shared(rowset->tablet_schema()); + auto s = segments[0]->new_iterator(schema, opts, &iter); + ASSERT_TRUE(s.ok()); + auto read_block = rowset->tablet_schema()->create_block(); + res = iter->next_batch(&read_block); + ASSERT_TRUE(res.ok()) << res; + ASSERT_EQ(rows, read_block.rows()); + + // get the value from k2 column + ASSERT_EQ(455, read_block.get_by_position(1).column->get_int(0)); + ASSERT_EQ(456, read_block.get_by_position(1).column->get_int(1)); + ASSERT_EQ(456, read_block.get_by_position(1).column->get_int(2)); + ASSERT_EQ(457, read_block.get_by_position(1).column->get_int(3)); + // get the value from k1 column + ASSERT_EQ(122, read_block.get_by_position(0).column->get_int(1)); + ASSERT_EQ(123, read_block.get_by_position(0).column->get_int(2)); + // get the value from sequence column + ASSERT_EQ(100, read_block.get_by_position(4).column->get_int(3)); + + // check primary key index + const PrimaryKeyIndexReader* pk_index_reader = segments[0]->get_primary_key_index(); + ASSERT_TRUE(pk_index_reader != nullptr); + // check short key index + const ShortKeyIndexDecoder* sk_index_decoder = segments[0]->get_short_key_index(); + // TODO see Segment::_load_index_impl, short key index is not loaded currently + ASSERT_TRUE(sk_index_decoder == nullptr); + + res = engine_ref->tablet_manager()->drop_tablet(request.tablet_id, request.replica_id, false); + ASSERT_TRUE(res.ok()); +} +} // namespace doris diff --git a/be/test/olap/lru_cache_test.cpp b/be/test/olap/lru_cache_test.cpp index 9adb30b93054f4..1acc38f2b9e084 100644 --- a/be/test/olap/lru_cache_test.cpp +++ b/be/test/olap/lru_cache_test.cpp @@ -88,18 +88,18 @@ class CacheTest : public testing::Test { void* value; }; - class CacheTestSizePolicy : public LRUCachePolicyTrackingManual { + class CacheTestSizePolicy : public LRUCachePolicy { public: CacheTestSizePolicy(size_t capacity) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::FOR_UT_CACHE_SIZE, capacity, - LRUCacheType::SIZE, -1) {} + : LRUCachePolicy(CachePolicy::CacheType::FOR_UT_CACHE_SIZE, capacity, + LRUCacheType::SIZE, -1) {} }; - class CacheTestNumberPolicy : public LRUCachePolicyTrackingManual { + class CacheTestNumberPolicy : public LRUCachePolicy { public: CacheTestNumberPolicy(size_t capacity, uint32_t num_shards) - : LRUCachePolicyTrackingManual(CachePolicy::CacheType::FOR_UT_CACHE_NUMBER, - capacity, LRUCacheType::NUMBER, -1, num_shards) {} + : LRUCachePolicy(CachePolicy::CacheType::FOR_UT_CACHE_NUMBER, capacity, + LRUCacheType::NUMBER, -1, num_shards) {} }; // there is 16 shards in ShardedLRUCache diff --git a/be/test/runtime/memory/thread_mem_tracker_mgr_test.cpp b/be/test/runtime/memory/thread_mem_tracker_mgr_test.cpp index d4624273b0b854..fad2116fca7630 100644 --- a/be/test/runtime/memory/thread_mem_tracker_mgr_test.cpp +++ b/be/test/runtime/memory/thread_mem_tracker_mgr_test.cpp @@ -167,8 +167,8 @@ TEST_F(ThreadMemTrackerMgrTest, MultiMemTracker) { std::unique_ptr thread_context = std::make_unique(); std::shared_ptr t1 = MemTrackerLimiter::create_shared(MemTrackerLimiter::Type::OTHER, "UT-MultiMemTracker1"); - std::shared_ptr t2 = std::make_shared("UT-MultiMemTracker2", t1.get()); - std::shared_ptr t3 = std::make_shared("UT-MultiMemTracker3", t1.get()); + std::shared_ptr t2 = std::make_shared("UT-MultiMemTracker2"); + std::shared_ptr t3 = std::make_shared("UT-MultiMemTracker3"); int64_t size1 = 4 * 1024; int64_t size2 = 4 * 1024 * 1024; diff --git a/be/test/util/algo_util_test.cpp b/be/test/util/algo_util_test.cpp new file mode 100644 index 00000000000000..cdf3316f1c94a8 --- /dev/null +++ b/be/test/util/algo_util_test.cpp @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include + +#include +#include + +#include "gtest/gtest_pred_impl.h" +#include "util/algorithm_util.h" + +namespace doris { + +TEST(TestAlgoUtil, DescentByStep) { + // double descent_by_step(int step_num, int64_t low_bound, int64_t high_bound, int64_t current) + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 101), 0.9); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 99), 1); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 200, 100, 101), 1); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 111), 0.8); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 188), 0.1); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 100), 1); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 200), 0); + EXPECT_EQ(AlgoUtil::descent_by_step(10, 100, 200, 300), 0); + + EXPECT_EQ(AlgoUtil::descent_by_step(4, 100, 200, 133), 0.5); + EXPECT_EQ(AlgoUtil::descent_by_step(4, 100, 200, 110), 0.75); + EXPECT_EQ(AlgoUtil::descent_by_step(4, 100, 200, 125), 0.75); + EXPECT_EQ(AlgoUtil::descent_by_step(4, 100, 200, 126), 0.5); +} + +} // namespace doris diff --git a/be/test/vec/columns/column_nullable_seriazlization_test.cpp b/be/test/vec/columns/column_nullable_seriazlization_test.cpp index 635916328ba916..70702369cd31af 100644 --- a/be/test/vec/columns/column_nullable_seriazlization_test.cpp +++ b/be/test/vec/columns/column_nullable_seriazlization_test.cpp @@ -20,92 +20,19 @@ #include #include -#include +#include "column_nullable_test.h" #include "vec/columns/column.h" #include "vec/columns/column_nullable.h" #include "vec/columns/column_string.h" #include "vec/columns/columns_number.h" #include "vec/common/arena.h" #include "vec/common/string_ref.h" -#include "vec/core/field.h" #include "vec/core/types.h" -#include "vec/data_types/data_type.h" -#include "vec/data_types/data_type_date_time.h" -#include "vec/data_types/data_type_string.h" using namespace doris; using namespace doris::vectorized; -static std::string generate_random_string(size_t max_length) { - std::srand(std::time(nullptr)); // use current time as seed for random generator - - if (max_length == 0) { - return ""; - } - - auto randbyte = []() -> char { - // generate a random byte, in range [0x00, 0xFF] - return static_cast(rand() % 256); - }; - - std::string str(max_length, 0); - std::generate_n(str.begin(), max_length, randbyte); - - return str; -} - -static MutableColumnPtr create_null_map(size_t input_rows_count, bool all_null = false, - bool all_not_null = false) { - std::srand(std::time(nullptr)); // use current time as seed for random generator - auto null_map = ColumnUInt8::create(); - for (size_t i = 0; i < input_rows_count; ++i) { - if (all_null) { - null_map->insert(1); - } else if (all_not_null) { - null_map->insert(0); - } else { - null_map->insert(rand() % 2); - } - } - return null_map; -} - -template -static MutableColumnPtr create_nested_column(size_t input_rows_count) { - MutableColumnPtr column; - if constexpr (std::is_integral_v) { - column = ColumnVector::create(); - } else if constexpr (std::is_same_v) { - column = ColumnString::create(); - } else if constexpr (std::is_same_v) { - column = ColumnDecimal64::create(0, 6); - } - - for (size_t i = 0; i < input_rows_count; ++i) { - if constexpr (std::is_integral_v) { - column->insert(rand() % std::numeric_limits::max()); - } else if constexpr (std::is_same_v) { - column->insert(generate_random_string(rand() % 512)); - } else if constexpr (std::is_same_v) { - column->insert(Int64(rand() % std::numeric_limits::max())); - } else { - throw std::runtime_error("Unsupported type"); - } - } - - return column; -} - -template -static ColumnNullable::MutablePtr create_column_nullable(size_t input_rows_count, - bool all_null = false, - bool all_not_null = false) { - auto null_map = create_null_map(input_rows_count, all_null, all_not_null); - auto nested_column = create_nested_column(input_rows_count); - return ColumnNullable::create(std::move(nested_column), std::move(null_map)); -} - TEST(ColumnNullableSerializationTest, column_nullable_column_vector) { const size_t input_rows_count = 4096 * 1000; ColumnNullable::Ptr column_nullable = create_column_nullable(input_rows_count); diff --git a/be/test/vec/columns/column_nullable_test.cpp b/be/test/vec/columns/column_nullable_test.cpp new file mode 100644 index 00000000000000..9e66d5bb302f38 --- /dev/null +++ b/be/test/vec/columns/column_nullable_test.cpp @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "vec/columns/column_nullable.h" + +#include +#include +#include + +#include "column_nullable_test.h" +#include "common/status.h" +#include "runtime/define_primitive_type.h" +#include "vec/columns/columns_number.h" +#include "vec/columns/predicate_column.h" +#include "vec/core/field.h" +#include "vec/core/types.h" +#include "vec/data_types/data_type.h" + +namespace doris::vectorized { + +TEST(ColumnNullableTest, NullTest) { + ColumnNullable::MutablePtr null_col = create_column_nullable(500, true); + EXPECT_TRUE(null_col->has_null()); + + ColumnNullable::MutablePtr dst_col = + ColumnNullable::create(create_nested_column(10), ColumnUInt8::create(10)); + EXPECT_FALSE(dst_col->has_null()); + + ColumnInt64::MutablePtr source_col = ColumnInt64::create(); + source_col->insert_range_of_integer(0, 100); + + dst_col->insert(Field()); + EXPECT_TRUE(dst_col->has_null()); + dst_col->clear(); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_many_from_not_nullable(*source_col, 0, 10); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_from_not_nullable(*source_col, 5); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_many_from_not_nullable(*source_col, 5, 5); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_range_from_not_nullable(*source_col, 5, 5); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_range_from( + *ColumnNullable::create(std::move(source_col), ColumnUInt8::create(10)), 5, 5); + EXPECT_FALSE(dst_col->has_null()); + + dst_col->clear(); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_null_elements(10); + EXPECT_TRUE(dst_col->has_null()); + + dst_col->clear(); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_from(*null_col, 100); + EXPECT_TRUE(dst_col->has_null()); + + auto tmp_col = + ColumnNullable::create(create_nested_column(10), ColumnUInt8::create(10, 1)); + + dst_col->clear(); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_from(*tmp_col, 9); + EXPECT_TRUE(dst_col->has_null()); + + dst_col->clear(); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_range_from(*tmp_col, 0, 3); + EXPECT_TRUE(dst_col->has_null()); + + dst_col->clear(); + EXPECT_FALSE(dst_col->has_null()); + dst_col->insert_from(*tmp_col, 9); + EXPECT_TRUE(dst_col->has_null()); +} + +TEST(ColumnNullableTest, PredicateTest) { + auto nullable_pred = + ColumnNullable::create(PredicateColumnType::create(), ColumnUInt8::create()); + nullable_pred->insert_many_defaults(3); + EXPECT_TRUE(nullable_pred->has_null()); + nullable_pred->insert_null_elements(10); + EXPECT_TRUE(nullable_pred->has_null()); + + nullable_pred->clear(); + EXPECT_FALSE(nullable_pred->has_null()); + nullable_pred->insert_null_elements(10); + EXPECT_TRUE(nullable_pred->has_null()); // now it have 10 nulls + + auto null_dst = ColumnNullable::create(ColumnDate::create(), ColumnUInt8::create()); + EXPECT_FALSE(null_dst->has_null()); + + uint16_t selector[] = {5, 8}; // both null + EXPECT_EQ(nullable_pred->filter_by_selector(selector, 2, null_dst.get()), Status::OK()); + // filter_by_selector must announce to update has_null to make below right. + EXPECT_TRUE(null_dst->has_null()); +} +} // namespace doris::vectorized \ No newline at end of file diff --git a/be/test/vec/columns/column_nullable_test.h b/be/test/vec/columns/column_nullable_test.h new file mode 100644 index 00000000000000..0f90a25c9b56b3 --- /dev/null +++ b/be/test/vec/columns/column_nullable_test.h @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include + +#include +#include +#include + +#include "vec/columns/column.h" +#include "vec/columns/column_nullable.h" +#include "vec/columns/column_string.h" +#include "vec/columns/columns_number.h" +#include "vec/core/field.h" +#include "vec/core/types.h" +#include "vec/data_types/data_type.h" +#include "vec/data_types/data_type_date_time.h" +#include "vec/data_types/data_type_string.h" + +namespace doris::vectorized { + +inline std::string generate_random_string(size_t max_length) { + std::srand(std::time(nullptr)); // use current time as seed for random generator + + if (max_length == 0) { + return ""; + } + + auto randbyte = []() -> char { + // generate a random byte, in range [0x00, 0xFF] + return static_cast(rand() % 256); + }; + + std::string str(max_length, 0); + std::generate_n(str.begin(), max_length, randbyte); + + return str; +} + +inline MutableColumnPtr create_null_map(size_t input_rows_count, bool all_null = false, + bool all_not_null = false) { + std::srand(std::time(nullptr)); // use current time as seed for random generator + auto null_map = ColumnUInt8::create(); + for (size_t i = 0; i < input_rows_count; ++i) { + if (all_null) { + null_map->insert(1); + } else if (all_not_null) { + null_map->insert(0); + } else { + null_map->insert(rand() % 2); + } + } + return null_map; +} + +template +inline MutableColumnPtr create_nested_column(size_t input_rows_count) { + MutableColumnPtr column; + if constexpr (std::is_integral_v) { + column = ColumnVector::create(); + } else if constexpr (std::is_same_v) { + column = ColumnString::create(); + } else if constexpr (std::is_same_v) { + column = ColumnDecimal64::create(0, 6); + } + + for (size_t i = 0; i < input_rows_count; ++i) { + if constexpr (std::is_integral_v) { + column->insert(rand() % std::numeric_limits::max()); + } else if constexpr (std::is_same_v) { + column->insert(generate_random_string(rand() % 512)); + } else if constexpr (std::is_same_v) { + column->insert(Int64(rand() % std::numeric_limits::max())); + } else { + throw std::runtime_error("Unsupported type"); + } + } + + return column; +} + +template +inline ColumnNullable::MutablePtr create_column_nullable(size_t input_rows_count, + bool all_null = false, + bool all_not_null = false) { + auto null_map = create_null_map(input_rows_count, all_null, all_not_null); + auto nested_column = create_nested_column(input_rows_count); + return ColumnNullable::create(std::move(nested_column), std::move(null_map)); +} +} // namespace doris::vectorized \ No newline at end of file diff --git a/be/test/vec/columns/column_resize_test.cpp b/be/test/vec/columns/column_resize_test.cpp index dd6f78f3e8d2f0..0b5f6ba183ea3c 100644 --- a/be/test/vec/columns/column_resize_test.cpp +++ b/be/test/vec/columns/column_resize_test.cpp @@ -1,4 +1,3 @@ - // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information diff --git a/be/test/vec/core/column_string_test.cpp b/be/test/vec/core/column_string_test.cpp index 81f41bd11c465c..a1967a30ce7224 100644 --- a/be/test/vec/core/column_string_test.cpp +++ b/be/test/vec/core/column_string_test.cpp @@ -48,8 +48,16 @@ TEST(ColumnStringTest, TestConcat) { ColumnNumbers arguments = {0, 1}; FunctionStringConcat func_concat; - auto status = func_concat.execute_impl(nullptr, block, arguments, 2, 3); - EXPECT_TRUE(status.ok()); + auto fn_ctx = FunctionContext::create_context(nullptr, TypeDescriptor {}, {}); + { + auto status = + func_concat.open(fn_ctx.get(), FunctionContext::FunctionStateScope::FRAGMENT_LOCAL); + EXPECT_TRUE(status.ok()); + } + { + auto status = func_concat.execute_impl(fn_ctx.get(), block, arguments, 2, 3); + EXPECT_TRUE(status.ok()); + } auto actual_res_col = block.get_by_position(2).column; EXPECT_EQ(actual_res_col->size(), 3); diff --git a/be/test/vec/function/function_array_element_test.cpp b/be/test/vec/function/function_array_element_test.cpp index 2d89164f55f498..16ce28f52599e1 100644 --- a/be/test/vec/function/function_array_element_test.cpp +++ b/be/test/vec/function/function_array_element_test.cpp @@ -15,20 +15,14 @@ // specific language governing permissions and limitations // under the License. -#include #include -#include -#include "common/status.h" #include "function_test_util.h" -#include "gtest/gtest_pred_impl.h" -#include "testutil/any_type.h" #include "vec/core/field.h" #include "vec/core/types.h" #include "vec/data_types/data_type_date.h" #include "vec/data_types/data_type_date_time.h" #include "vec/data_types/data_type_decimal.h" -#include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_number.h" #include "vec/data_types/data_type_string.h" @@ -137,10 +131,10 @@ TEST(function_array_element_test, element_at) { Array vec = {ut_type::DECIMALFIELD(17014116.67), ut_type::DECIMALFIELD(-17014116.67), ut_type::DECIMALFIELD(0.0)}; DataSet data_set = {{{vec, Int64(0)}, Null()}, - {{vec, Int64(1)}, ut_type::DECIMAL(17014116.67)}, + {{vec, Int64(1)}, ut_type::DECIMALV2(17014116.67)}, {{vec, Int64(4)}, Null()}, - {{vec, Int64(-1)}, ut_type::DECIMAL(0.0)}, - {{vec, Int64(-2)}, ut_type::DECIMAL(-17014116.67)}, + {{vec, Int64(-1)}, ut_type::DECIMALV2(0.0)}, + {{vec, Int64(-2)}, ut_type::DECIMALV2(-17014116.67)}, {{vec, Int64(-4)}, Null()}, {{Null(), Int64(1)}, Null()}, {{empty_arr, Int64(0)}, Null()}, diff --git a/be/test/vec/function/function_array_index_test.cpp b/be/test/vec/function/function_array_index_test.cpp index 92c7901bbfe112..24bd5797869a11 100644 --- a/be/test/vec/function/function_array_index_test.cpp +++ b/be/test/vec/function/function_array_index_test.cpp @@ -16,14 +16,10 @@ // under the License. #include -#include -#include "common/status.h" #include "function_test_util.h" -#include "testutil/any_type.h" #include "vec/core/field.h" #include "vec/core/types.h" -#include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_number.h" namespace doris::vectorized { @@ -144,10 +140,10 @@ TEST(function_array_index_test, array_contains) { Array vec = {ut_type::DECIMALFIELD(17014116.67), ut_type::DECIMALFIELD(-17014116.67), ut_type::DECIMALFIELD(0.0)}; - DataSet data_set = {{{vec, ut_type::DECIMAL(-17014116.67)}, UInt8(1)}, - {{vec, ut_type::DECIMAL(0)}, UInt8(1)}, - {{Null(), ut_type::DECIMAL(0)}, Null()}, - {{empty_arr, ut_type::DECIMAL(0)}, UInt8(0)}}; + DataSet data_set = {{{vec, ut_type::DECIMALV2(-17014116.67)}, UInt8(1)}, + {{vec, ut_type::DECIMALV2(0)}, UInt8(1)}, + {{Null(), ut_type::DECIMALV2(0)}, Null()}, + {{empty_arr, ut_type::DECIMALV2(0)}, UInt8(0)}}; static_cast(check_function(func_name, input_types, data_set)); } @@ -244,10 +240,10 @@ TEST(function_array_index_test, array_position) { Array vec = {ut_type::DECIMALFIELD(17014116.67), ut_type::DECIMALFIELD(-17014116.67), ut_type::DECIMALFIELD(0)}; - DataSet data_set = {{{vec, ut_type::DECIMAL(-17014116.67)}, Int64(2)}, - {{vec, ut_type::DECIMAL(0)}, Int64(3)}, - {{Null(), ut_type::DECIMAL(0)}, Null()}, - {{empty_arr, ut_type::DECIMAL(0)}, Int64(0)}}; + DataSet data_set = {{{vec, ut_type::DECIMALV2(-17014116.67)}, Int64(2)}, + {{vec, ut_type::DECIMALV2(0)}, Int64(3)}, + {{Null(), ut_type::DECIMALV2(0)}, Null()}, + {{empty_arr, ut_type::DECIMALV2(0)}, Int64(0)}}; static_cast(check_function(func_name, input_types, data_set)); } diff --git a/be/test/vec/function/function_math_test.cpp b/be/test/vec/function/function_math_test.cpp index 00d0770935be19..bb24d141e4c3be 100644 --- a/be/test/vec/function/function_math_test.cpp +++ b/be/test/vec/function/function_math_test.cpp @@ -15,21 +15,13 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include -#include +#include +#include #include #include -#include -#include "common/status.h" #include "function_test_util.h" -#include "gtest/gtest_pred_impl.h" -#include "testutil/any_type.h" #include "vec/core/types.h" -#include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_number.h" #include "vec/data_types/data_type_string.h" @@ -291,7 +283,7 @@ TEST(MathFunctionTest, abs_test) { {{INT(0)}, BIGINT(0)}, {{INT(-60)}, BIGINT(60)}, {{INT(INT_MAX)}, BIGINT(INT_MAX)}, - {{INT(INT_MIN)}, BIGINT(-1ll * INT_MIN)}}; + {{INT(INT_MIN)}, BIGINT(-1LL * INT_MIN)}}; static_cast(check_function(func_name, input_types, data_set)); } @@ -498,7 +490,6 @@ TEST(MathFunctionTest, money_format_test) { static_cast(check_function(func_name, input_types, data_set)); } - { InputTypeSet input_types = {TypeIndex::Int128}; DataSet data_set = {{{Null()}, Null()}, @@ -519,11 +510,19 @@ TEST(MathFunctionTest, money_format_test) { { InputTypeSet input_types = {TypeIndex::Decimal128V2}; DataSet data_set = {{{Null()}, Null()}, - {{DECIMAL(17014116.67)}, VARCHAR("17,014,116.67")}, - {{DECIMAL(-17014116.67)}, VARCHAR("-17,014,116.67")}}; + {{DECIMALV2(17014116.67)}, VARCHAR("17,014,116.67")}, + {{DECIMALV2(-17014116.67)}, VARCHAR("-17,014,116.67")}}; static_cast(check_function(func_name, input_types, data_set)); } + { + BaseInputTypeSet input_types = {TypeIndex::Decimal64}; + DataSet data_set = {{{Null()}, Null()}, + {{DECIMAL64(17014116, 670000000)}, VARCHAR("17,014,116.67")}, + {{DECIMAL64(-17014116, -670000000)}, VARCHAR("-17,014,116.67")}}; + + check_function_all_arg_comb(func_name, input_types, data_set); + } } } // namespace doris::vectorized diff --git a/be/test/vec/function/function_string_test.cpp b/be/test/vec/function/function_string_test.cpp index 20727c6067176e..5d1d6fb9d8b217 100644 --- a/be/test/vec/function/function_string_test.cpp +++ b/be/test/vec/function/function_string_test.cpp @@ -17,21 +17,15 @@ #include #include -#include #include #include #include -#include "common/status.h" #include "function_test_util.h" -#include "gtest/gtest_pred_impl.h" #include "gutil/integral_types.h" -#include "testutil/any_type.h" #include "util/encryption_util.h" #include "vec/core/field.h" #include "vec/core/types.h" -#include "vec/data_types/data_type_date_time.h" -#include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_number.h" #include "vec/data_types/data_type_string.h" @@ -2977,8 +2971,7 @@ TEST(function_string_test, function_uuid_test) { {{std::string("ffffffff-ffff-ffff-ffff-ffffffffffff")}, (__int128)-1}, {{std::string("00000000-0000-0000-0000-000000000000")}, (__int128)0}, {{std::string("123")}, Null()}}; - static_cast(check_function_all_arg_comb(func_name, input_types, - data_set)); + check_function_all_arg_comb(func_name, input_types, data_set); } { std::string func_name = "int_to_uuid"; @@ -3092,68 +3085,6 @@ TEST(function_string_test, function_overlay_test) { } } -//bug TEST(function_string_test, function_strcmp_test) { -// std::string func_name = "strcmp"; -// { -// BaseInputTypeSet input_types = {TypeIndex::String, TypeIndex::String}; - -// DataSet data_set = { -// {{std::string("A"), std::string("A")}, std::int8_t(0)}, -// {{std::string("A"), std::string(",")}, std::int8_t(1)}, -// {{std::string("A"), std::string("")}, std::int8_t(1)}, -// {{std::string("A"), Null()}, Null()}, -// {{std::string("A"), std::string(",ABC,")}, std::int8_t(1)}, -// {{std::string("A"), std::string("123ABC!@# _")}, std::int8_t(1)}, -// {{std::string("A"), std::string("10@()*()$*!@")}, std::int8_t(1)}, -// {{std::string(","), std::string("A")}, std::int8_t(-1)}, -// {{std::string(","), std::string(",")}, std::int8_t(0)}, -// {{std::string(","), std::string("")}, std::int8_t(1)}, -// {{std::string(","), Null()}, Null()}, -// {{std::string(","), std::string(",ABC,")}, std::int8_t(-1)}, -// {{std::string(","), std::string("123ABC!@# _")}, std::int8_t(-1)}, -// {{std::string(","), std::string("10@()*()$*!@")}, std::int8_t(-1)}, -// {{std::string(""), std::string("A")}, std::int8_t(-1)}, -// {{std::string(""), std::string(",")}, std::int8_t(-1)}, -// {{std::string(""), std::string("")}, std::int8_t(0)}, -// {{std::string(""), Null()}, Null()}, -// {{std::string(""), std::string(",ABC,")}, std::int8_t(-1)}, -// {{std::string(""), std::string("123ABC!@# _")}, std::int8_t(-1)}, -// {{std::string(""), std::string("10@()*()$*!@")}, std::int8_t(-1)}, -// {{Null(), std::string("A")}, Null()}, -// {{Null(), std::string(",")}, Null()}, -// {{Null(), std::string("")}, Null()}, -// {{Null(), Null()}, Null()}, -// {{Null(), std::string(",ABC,")}, Null()}, -// {{Null(), std::string("123ABC!@# _")}, Null()}, -// {{Null(), std::string("10@()*()$*!@")}, Null()}, -// {{std::string(",ABC,"), std::string("A")}, std::int8_t(-1)}, -// {{std::string(",ABC,"), std::string(",")}, std::int8_t(1)}, -// {{std::string(",ABC,"), std::string("")}, std::int8_t(1)}, -// {{std::string(",ABC,"), Null()}, Null()}, -// {{std::string(",ABC,"), std::string(",ABC,")}, std::int8_t(0)}, -// {{std::string(",ABC,"), std::string("123ABC!@# _")}, std::int8_t(-1)}, -// {{std::string(",ABC,"), std::string("10@()*()$*!@")}, std::int8_t(-1)}, -// {{std::string("123ABC!@# _"), std::string("A")}, std::int8_t(-1)}, -// {{std::string("123ABC!@# _"), std::string(",")}, std::int8_t(1)}, -// {{std::string("123ABC!@# _"), std::string("")}, std::int8_t(1)}, -// {{std::string("123ABC!@# _"), Null()}, Null()}, -// {{std::string("123ABC!@# _"), std::string(",ABC,")}, std::int8_t(1)}, -// {{std::string("123ABC!@# _"), std::string("123ABC!@# _")}, std::int8_t(0)}, -// {{std::string("123ABC!@# _"), std::string("10@()*()$*!@")}, std::int8_t(1)}, -// {{std::string("10@()*()$*!@"), std::string("A")}, std::int8_t(-1)}, -// {{std::string("10@()*()$*!@"), std::string(",")}, std::int8_t(1)}, -// {{std::string("10@()*()$*!@"), std::string("")}, std::int8_t(1)}, -// {{std::string("10@()*()$*!@"), Null()}, Null()}, -// {{std::string("10@()*()$*!@"), std::string(",ABC,")}, std::int8_t(1)}, -// {{std::string("10@()*()$*!@"), std::string("123ABC!@# _")}, std::int8_t(-1)}, -// {{std::string("10@()*()$*!@"), std::string("10@()*()$*!@")}, std::int8_t(0)}, -// }; -// static_cast( -// check_function_all_arg_comb(func_name, input_types, data_set)); -// } -// } -// - TEST(function_string_test, function_initcap) { std::string func_name {"initcap"}; diff --git a/be/test/vec/function/function_test_template.cpp b/be/test/vec/function/function_test_template.cpp new file mode 100644 index 00000000000000..5f2778afc69282 --- /dev/null +++ b/be/test/vec/function/function_test_template.cpp @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include + +#include "function_test_util.h" +#include "vec/core/types.h" +#include "vec/data_types/data_type_number.h" +#include "vec/data_types/data_type_string.h" + +namespace doris::vectorized { + +TEST(FunctionTestTemplate, two_args_template) { + std::string func_name = "atan2"; + + BaseInputTypeSet input_types = {TypeIndex::Float64, TypeIndex::Float64}; + + DataSet data_set = { + {{-1.0, -2.0}, -2.677945044588987}, {{0.0, 0.0}, 0.0}, + {{0.5, 0.5}, 0.7853981633974483}, {{M_PI, M_PI / 2}, 1.1071487177940904}, + {{1e100, 1e-100}, 1.5707963267948966}, {{Null(), Null()}, Null()}}; + + static_cast( + check_function_all_arg_comb(func_name, input_types, data_set)); +} + +TEST(FunctionTestTemplate, three_args_template) { + std::string func_name = "concat"; + + BaseInputTypeSet input_types = {TypeIndex::String, TypeIndex::String, TypeIndex::String}; + + DataSet data_set = {{{std::string(""), std::string(""), std::string("")}, std::string("")}, + {{std::string("123"), std::string("456"), std::string("789")}, + std::string("123456789")}, + {{std::string("123"), Null(), std::string("789")}, Null()}, + {{std::string("中文"), std::string("中文"), std::string("中文")}, + std::string("中文中文中文")}, + {{std::string(" "), std::string(" "), std::string(" ")}, + std::string(" ")}}; + + check_function_all_arg_comb(func_name, input_types, data_set); +} +} // namespace doris::vectorized \ No newline at end of file diff --git a/be/test/vec/function/function_test_util.cpp b/be/test/vec/function/function_test_util.cpp index 88014c7273525b..ddc97c440ed07a 100644 --- a/be/test/vec/function/function_test_util.cpp +++ b/be/test/vec/function/function_test_util.cpp @@ -25,6 +25,7 @@ #include "runtime/runtime_state.h" #include "util/binary_cast.hpp" #include "util/bitmap_value.h" +#include "vec/core/types.h" #include "vec/data_types/data_type_array.h" #include "vec/data_types/data_type_bitmap.h" #include "vec/data_types/data_type_date.h" @@ -64,6 +65,7 @@ uint64_t str_to_datetime_v2(std::string datetime_str, std::string datetime_forma return binary_cast, UInt64>(v); } +// NOLINTBEGIN(readability-function-size) size_t type_index_to_data_type(const std::vector& input_types, size_t index, ut_type::UTDataTypeDesc& ut_desc, DataTypePtr& type) { doris::TypeDescriptor& desc = ut_desc.type_desc; @@ -144,6 +146,23 @@ size_t type_index_to_data_type(const std::vector& input_types, size_t i desc.type = doris::PrimitiveType::TYPE_DECIMALV2; type = std::make_shared>(); return 1; + // for decimals in ut we set the default scale and precision. for more scales, we prefer test them in regression. + case TypeIndex::Decimal32: + desc.type = doris::PrimitiveType::TYPE_DECIMAL32; + type = std::make_shared>(9, 5); + return 1; + case TypeIndex::Decimal64: + desc.type = doris::PrimitiveType::TYPE_DECIMAL64; + type = std::make_shared>(18, 9); + return 1; + case TypeIndex::Decimal128V3: + desc.type = doris::PrimitiveType::TYPE_DECIMAL128I; + type = std::make_shared>(38, 20); + return 1; + case TypeIndex::Decimal256: + desc.type = doris::PrimitiveType::TYPE_DECIMAL256; + type = std::make_shared>(76, 40); + return 1; case TypeIndex::DateTime: desc.type = doris::PrimitiveType::TYPE_DATETIME; type = std::make_shared(); @@ -188,6 +207,8 @@ size_t type_index_to_data_type(const std::vector& input_types, size_t i return 0; } } +// NOLINTEND(readability-function-size) + bool parse_ut_data_type(const std::vector& input_types, ut_type::UTDataTypeDescs& descs) { descs.clear(); descs.reserve(input_types.size()); @@ -202,7 +223,7 @@ bool parse_ut_data_type(const std::vector& input_types, ut_type::UTData return false; } if (desc.is_nullable) { - desc.data_type = make_nullable(std::move(desc.data_type)); + desc.data_type = make_nullable(desc.data_type); } desc.col_name = "k" + std::to_string(i); descs.emplace_back(desc); @@ -232,6 +253,7 @@ bool insert_date_cell(MutableColumnPtr& column, const std::string& format, const return true; } +// NOLINTBEGIN(readability-function-size) bool insert_cell(MutableColumnPtr& column, DataTypePtr type_ptr, const AnyType& cell) { if (cell.type() == &typeid(Null)) { column->insert_data(nullptr, 0); @@ -250,7 +272,7 @@ bool insert_cell(MutableColumnPtr& column, DataTypePtr type_ptr, const AnyType& JsonBinaryValue jsonb_val(str.c_str(), str.size()); column->insert_data(jsonb_val.value(), jsonb_val.size()); } else if (type.idx == TypeIndex::BitMap) { - BitmapValue* bitmap = any_cast(cell); + auto* bitmap = any_cast(cell); column->insert_data((char*)bitmap, sizeof(BitmapValue)); } else if (type.is_ipv4()) { auto value = any_cast(cell); @@ -283,7 +305,19 @@ bool insert_cell(MutableColumnPtr& column, DataTypePtr type_ptr, const AnyType& auto value = any_cast(cell); column->insert_data(reinterpret_cast(&value), 0); } else if (type.is_decimal128v2()) { - auto value = any_cast>(cell); + auto value = any_cast(cell); + column->insert_data(reinterpret_cast(&value), 0); + } else if (type.is_decimal32()) { + auto value = any_cast(cell); + column->insert_data(reinterpret_cast(&value), 0); + } else if (type.is_decimal64()) { + auto value = any_cast(cell); + column->insert_data(reinterpret_cast(&value), 0); + } else if (type.is_decimal128v3()) { + auto value = any_cast(cell); + column->insert_data(reinterpret_cast(&value), 0); + } else if (type.is_decimal256()) { + auto value = any_cast(cell); column->insert_data(reinterpret_cast(&value), 0); } else if (type.is_date_time()) { static std::string date_time_format("%Y-%m-%d %H:%i:%s"); @@ -310,6 +344,7 @@ bool insert_cell(MutableColumnPtr& column, DataTypePtr type_ptr, const AnyType& } return true; } +// NOLINTEND(readability-function-size) Block* create_block_from_inputset(const InputTypeSet& input_types, const InputDataSet& input_set) { // 1.0 create data type diff --git a/be/test/vec/function/function_test_util.h b/be/test/vec/function/function_test_util.h index 62666d7c67c5be..0f2b53a68d2384 100644 --- a/be/test/vec/function/function_test_util.h +++ b/be/test/vec/function/function_test_util.h @@ -19,22 +19,17 @@ #include #include -#include -#include #include #include #include -#include #include #include #include #include #include "common/status.h" -#include "gtest/gtest_pred_impl.h" #include "olap/olap_common.h" #include "runtime/define_primitive_type.h" -#include "runtime/exec_env.h" #include "runtime/types.h" #include "testutil/any_type.h" #include "testutil/function_utils.h" @@ -52,12 +47,14 @@ #include "vec/core/column_with_type_and_name.h" #include "vec/core/field.h" #include "vec/core/types.h" +#include "vec/core/wide_integer.h" #include "vec/data_types/data_type.h" #include "vec/data_types/data_type_bitmap.h" #include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_number.h" #include "vec/data_types/data_type_string.h" #include "vec/functions/simple_function_factory.h" + namespace doris::vectorized { class DataTypeJsonb; @@ -107,7 +104,17 @@ using FLOAT = float; using IPV4 = uint32_t; using IPV6 = uint128_t; -inline auto DECIMAL = Decimal128V2::double_to_decimal; +// cell constructors. could also use from_int_frac if you'd like +inline auto DECIMALV2 = Decimal128V2::double_to_decimal; +inline auto DECIMAL32 = [](int32_t x, int32_t y) { return Decimal32::from_int_frac(x, y, 5); }; +inline auto DECIMAL64 = [](int64_t x, int64_t y) { return Decimal64::from_int_frac(x, y, 9); }; +inline auto DECIMAL128V3 = [](int128_t x, int128_t y) { + return Decimal128V3::from_int_frac(x, y, 20); +}; +inline auto DECIMAL256 = [](wide::Int256 x, wide::Int256 y) { + return Decimal256::from_int_frac(x, y, 40); +}; + inline auto DECIMALFIELD = [](double v) { return DecimalField(Decimal128V2::double_to_decimal(v), 9); }; @@ -272,6 +279,16 @@ Status check_function(const std::string& func_name, const InputTypeSet& input_ty fn_ctx_return.type = doris::PrimitiveType::TYPE_DATEV2; } else if (std::is_same_v) { fn_ctx_return.type = doris::PrimitiveType::TYPE_DATETIMEV2; + } else if (std::is_same_v) { + fn_ctx_return.type = doris::PrimitiveType::TYPE_DECIMALV2; + } else if (std::is_same_v) { + fn_ctx_return.type = doris::PrimitiveType::TYPE_DECIMAL32; + } else if (std::is_same_v) { + fn_ctx_return.type = doris::PrimitiveType::TYPE_DECIMAL64; + } else if (std::is_same_v) { + fn_ctx_return.type = doris::PrimitiveType::TYPE_DECIMAL128I; + } else if (std::is_same_v) { + fn_ctx_return.type = doris::PrimitiveType::TYPE_DECIMAL256; } else { fn_ctx_return.type = doris::PrimitiveType::INVALID_TYPE; } diff --git a/be/test/vec/function/table_function_test.cpp b/be/test/vec/function/table_function_test.cpp index 95983e4d31d1ff..a5c49dbdba94d6 100644 --- a/be/test/vec/function/table_function_test.cpp +++ b/be/test/vec/function/table_function_test.cpp @@ -20,14 +20,12 @@ #include #include -#include #include #include #include #include "common/status.h" #include "exprs/mock_vexpr.h" -#include "gtest/gtest_pred_impl.h" #include "testutil/any_type.h" #include "vec/core/field.h" #include "vec/core/types.h" @@ -46,8 +44,8 @@ using ::testing::SetArgPointee; class TableFunctionTest : public testing::Test { protected: - virtual void SetUp() {} - virtual void TearDown() {} + void SetUp() override {} + void TearDown() override {} void clear() { _ctx = nullptr; @@ -118,8 +116,8 @@ TEST_F(TableFunctionTest, vexplode_outer) { InputTypeSet output_types = {TypeIndex::Decimal128V2}; InputDataSet output_set = {{Null()}, {Null()}, - {ut_type::DECIMAL(17014116.67)}, - {ut_type::DECIMAL(-17014116.67)}}; + {ut_type::DECIMALV2(17014116.67)}, + {ut_type::DECIMALV2(-17014116.67)}}; check_vec_table_function(&explode_outer, input_types, input_set, output_types, output_set); } diff --git a/build.sh b/build.sh index c8aa2bf2c43626..1da5df76bb2fdd 100755 --- a/build.sh +++ b/build.sh @@ -885,6 +885,9 @@ if [[ ${BUILD_CLOUD} -eq 1 ]]; then cp -r -p "${DORIS_HOME}/cloud/output" "${DORIS_HOME}/output/ms" fi +mkdir -p "${DORIS_HOME}/output/tools" +cp -r -p tools/fdb "${DORIS_HOME}/output/tools" + echo "***************************************" echo "Successfully build Doris" echo "***************************************" diff --git a/cloud/CMakeLists.txt b/cloud/CMakeLists.txt index 7a273dd04e29c4..32e60f7bfb5467 100644 --- a/cloud/CMakeLists.txt +++ b/cloud/CMakeLists.txt @@ -29,7 +29,7 @@ include(cppcheck) if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") set (ARCH_AMD64 1) endif () -if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)") +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*)") set (ARCH_AARCH64 1) endif () if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm") diff --git a/cloud/src/common/bvars.cpp b/cloud/src/common/bvars.cpp index 21b91416a65b72..5658a34f3f5d07 100644 --- a/cloud/src/common/bvars.cpp +++ b/cloud/src/common/bvars.cpp @@ -72,6 +72,7 @@ BvarLatencyRecorderWithTag g_bvar_ms_update_delete_bitmap("ms", "update_delete_b BvarLatencyRecorderWithTag g_bvar_ms_get_delete_bitmap("ms", "get_delete_bitmap"); BvarLatencyRecorderWithTag g_bvar_ms_get_delete_bitmap_update_lock("ms", "get_delete_bitmap_update_lock"); +BvarLatencyRecorderWithTag g_bvar_ms_remove_delete_bitmap("ms", "remove_delete_bitmap"); BvarLatencyRecorderWithTag g_bvar_ms_get_instance("ms", "get_instance"); BvarLatencyRecorderWithTag g_bvar_ms_get_rl_task_commit_attach("ms", "get_rl_task_commit_attach"); BvarLatencyRecorderWithTag g_bvar_ms_reset_rl_progress("ms", "reset_rl_progress"); diff --git a/cloud/src/common/bvars.h b/cloud/src/common/bvars.h index c5067b053b362e..6f368ff06ebef7 100644 --- a/cloud/src/common/bvars.h +++ b/cloud/src/common/bvars.h @@ -171,6 +171,7 @@ extern BvarLatencyRecorderWithTag g_bvar_ms_finish_tablet_job; extern BvarLatencyRecorderWithTag g_bvar_ms_update_delete_bitmap; extern BvarLatencyRecorderWithTag g_bvar_ms_get_delete_bitmap; extern BvarLatencyRecorderWithTag g_bvar_ms_get_delete_bitmap_update_lock; +extern BvarLatencyRecorderWithTag g_bvar_ms_remove_delete_bitmap; extern BvarLatencyRecorderWithTag g_bvar_ms_get_cluster_status; extern BvarLatencyRecorderWithTag g_bvar_ms_set_cluster_status; extern BvarLatencyRecorderWithTag g_bvar_ms_get_instance; diff --git a/cloud/src/meta-service/meta_service.cpp b/cloud/src/meta-service/meta_service.cpp index 5f3d64b24c9b2c..17ba2953ce952d 100644 --- a/cloud/src/meta-service/meta_service.cpp +++ b/cloud/src/meta-service/meta_service.cpp @@ -1748,18 +1748,20 @@ void MetaServiceImpl::update_delete_bitmap(google::protobuf::RpcController* cont return; } - // 1. Check whether the lock expires - if (!check_delete_bitmap_lock(code, msg, ss, txn, instance_id, table_id, request->lock_id(), - request->initiator())) { - LOG(WARNING) << "failed to check delete bitmap lock, table_id=" << table_id - << " request lock_id=" << request->lock_id() - << " request initiator=" << request->initiator() << " msg" << msg; - return; - } - - // 2. Process pending delete bitmap - if (!process_pending_delete_bitmap(code, msg, ss, txn, instance_id, tablet_id)) { - return; + bool unlock = request->has_unlock() ? request->unlock() : false; + if (!unlock) { + // 1. Check whether the lock expires + if (!check_delete_bitmap_lock(code, msg, ss, txn, instance_id, table_id, request->lock_id(), + request->initiator())) { + LOG(WARNING) << "failed to check delete bitmap lock, table_id=" << table_id + << " request lock_id=" << request->lock_id() + << " request initiator=" << request->initiator() << " msg " << msg; + return; + } + // 2. Process pending delete bitmap + if (!process_pending_delete_bitmap(code, msg, ss, txn, instance_id, tablet_id)) { + return; + } } // 3. store all pending delete bitmap for this txn @@ -1791,6 +1793,8 @@ void MetaServiceImpl::update_delete_bitmap(google::protobuf::RpcController* cont } // 4. Update delete bitmap for curent txn + size_t total_key = 0; + size_t total_size = 0; for (size_t i = 0; i < request->rowset_ids_size(); ++i) { auto& key = delete_bitmap_keys.delete_bitmap_keys(i); auto& val = request->segment_delete_bitmaps(i); @@ -1814,19 +1818,23 @@ void MetaServiceImpl::update_delete_bitmap(google::protobuf::RpcController* cont msg = "failed to init txn"; return; } - if (!check_delete_bitmap_lock(code, msg, ss, txn, instance_id, table_id, - request->lock_id(), request->initiator())) { - LOG(WARNING) << "failed to check delete bitmap lock, table_id=" << table_id - << " request lock_id=" << request->lock_id() - << " request initiator=" << request->initiator() << " msg" << msg; - return; + if (!unlock) { + if (!check_delete_bitmap_lock(code, msg, ss, txn, instance_id, table_id, + request->lock_id(), request->initiator())) { + LOG(WARNING) << "failed to check delete bitmap lock, table_id=" << table_id + << " request lock_id=" << request->lock_id() + << " request initiator=" << request->initiator() << " msg" << msg; + return; + } } } // splitting large values (>90*1000) into multiple KVs cloud::put(txn.get(), key, val, 0); fdb_txn_size = fdb_txn_size + key.size() + val.size(); - LOG(INFO) << "xxx update delete bitmap put delete_bitmap_key=" << hex(key) - << " lock_id=" << request->lock_id() << " value_size: " << val.size(); + total_key++; + total_size += key.size() + val.size(); + VLOG_DEBUG << "xxx update delete bitmap put delete_bitmap_key=" << hex(key) + << " lock_id=" << request->lock_id() << " value_size: " << val.size(); } err = txn->commit(); @@ -1836,6 +1844,9 @@ void MetaServiceImpl::update_delete_bitmap(google::protobuf::RpcController* cont msg = ss.str(); return; } + LOG(INFO) << "update_delete_bitmap tablet_id=" << tablet_id << " lock_id=" << request->lock_id() + << " rowset_num=" << request->rowset_ids_size() << " total_key=" << total_key + << " total_size=" << total_size << " unlock=" << unlock; } void MetaServiceImpl::get_delete_bitmap(google::protobuf::RpcController* controller, @@ -2080,6 +2091,63 @@ void MetaServiceImpl::get_delete_bitmap_update_lock(google::protobuf::RpcControl } } +void MetaServiceImpl::remove_delete_bitmap(google::protobuf::RpcController* controller, + const RemoveDeleteBitmapRequest* request, + RemoveDeleteBitmapResponse* response, + ::google::protobuf::Closure* done) { + RPC_PREPROCESS(remove_delete_bitmap); + std::string cloud_unique_id = request->has_cloud_unique_id() ? request->cloud_unique_id() : ""; + if (cloud_unique_id.empty()) { + code = MetaServiceCode::INVALID_ARGUMENT; + msg = "cloud unique id not set"; + return; + } + + instance_id = get_instance_id(resource_mgr_, cloud_unique_id); + if (instance_id.empty()) { + code = MetaServiceCode::INVALID_ARGUMENT; + msg = "empty instance_id"; + LOG(WARNING) << msg << ", cloud_unique_id=" << request->cloud_unique_id(); + return; + } + RPC_RATE_LIMIT(remove_delete_bitmap) + auto tablet_id = request->tablet_id(); + auto& rowset_ids = request->rowset_ids(); + auto& begin_versions = request->begin_versions(); + auto& end_versions = request->end_versions(); + if (rowset_ids.size() != begin_versions.size() || rowset_ids.size() != end_versions.size()) { + code = MetaServiceCode::INVALID_ARGUMENT; + ss << "rowset and version size not match. " + << " rowset_size=" << rowset_ids.size() + << " begin_version_size=" << begin_versions.size() + << " end_version_size=" << end_versions.size(); + msg = ss.str(); + return; + } + std::unique_ptr txn; + TxnErrorCode err = txn_kv_->create_txn(&txn); + if (err != TxnErrorCode::TXN_OK) { + LOG(WARNING) << "failed to init txn"; + return; + } + for (size_t i = 0; i < rowset_ids.size(); i++) { + auto delete_bitmap_start = meta_delete_bitmap_key( + {instance_id, tablet_id, rowset_ids[i], begin_versions[i], 0}); + auto delete_bitmap_end = meta_delete_bitmap_key( + {instance_id, tablet_id, rowset_ids[i], end_versions[i], INT64_MAX}); + txn->remove(delete_bitmap_start, delete_bitmap_end); + } + err = txn->commit(); + if (err != TxnErrorCode::TXN_OK) { + code = cast_as(err); + ss << "failed to commit job kv, err=" << err; + msg = ss.str(); + return; + } + LOG(INFO) << "remove_delete_bitmap,tablet_id=" << tablet_id + << ",rowset_num=" << rowset_ids.size(); +} + std::pair MetaServiceImpl::get_instance_info( const std::string& instance_id, const std::string& cloud_unique_id, InstanceInfoPB* instance) { diff --git a/cloud/src/meta-service/meta_service.h b/cloud/src/meta-service/meta_service.h index 5415e7ce1e6962..a632d48efcc57d 100644 --- a/cloud/src/meta-service/meta_service.h +++ b/cloud/src/meta-service/meta_service.h @@ -265,6 +265,11 @@ class MetaServiceImpl : public cloud::MetaService { GetDeleteBitmapUpdateLockResponse* response, ::google::protobuf::Closure* done) override; + void remove_delete_bitmap(google::protobuf::RpcController* controller, + const RemoveDeleteBitmapRequest* request, + RemoveDeleteBitmapResponse* response, + ::google::protobuf::Closure* done) override; + // cloud control get cluster's status by this api void get_cluster_status(google::protobuf::RpcController* controller, const GetClusterStatusRequest* request, @@ -631,6 +636,13 @@ class MetaServiceProxy final : public MetaService { done); } + void remove_delete_bitmap(google::protobuf::RpcController* controller, + const RemoveDeleteBitmapRequest* request, + RemoveDeleteBitmapResponse* response, + ::google::protobuf::Closure* done) override { + call_impl(&cloud::MetaService::remove_delete_bitmap, controller, request, response, done); + } + // cloud control get cluster's status by this api void get_cluster_status(google::protobuf::RpcController* controller, const GetClusterStatusRequest* request, diff --git a/cloud/src/meta-service/meta_service_helper.h b/cloud/src/meta-service/meta_service_helper.h index 664d400b236c25..bb1f6197f1d3b0 100644 --- a/cloud/src/meta-service/meta_service_helper.h +++ b/cloud/src/meta-service/meta_service_helper.h @@ -58,6 +58,9 @@ void begin_rpc(std::string_view func_name, brpc::Controller* ctrl, const Request std::is_same_v) { VLOG_DEBUG << "begin " << func_name << " from " << ctrl->remote_side() << " request=" << req->ShortDebugString(); + } else if constexpr (std::is_same_v) { + LOG(INFO) << "begin " << func_name << " from " << ctrl->remote_side() + << " tablet_id=" << req->tablet_id() << " rowset_size=" << req->rowset_ids_size(); } else { LOG(INFO) << "begin " << func_name << " from " << ctrl->remote_side() << " request=" << req->ShortDebugString(); diff --git a/cloud/src/meta-service/meta_service_tablet_stats.cpp b/cloud/src/meta-service/meta_service_tablet_stats.cpp index 572ee6ebb68cda..501cecbab76d52 100644 --- a/cloud/src/meta-service/meta_service_tablet_stats.cpp +++ b/cloud/src/meta-service/meta_service_tablet_stats.cpp @@ -82,9 +82,16 @@ void internal_get_tablet_stats(MetaServiceCode& code, std::string& msg, Transact int get_detached_tablet_stats(const std::vector>& stats_kvs, TabletStats& detached_stats) { - if (stats_kvs.size() != 5 && stats_kvs.size() != 1) { - LOG(WARNING) << "incorrect tablet stats_kvs, it should be 1 or 5 size=" << stats_kvs.size(); + bool unexpected_size = false; + // clang-format off + if (stats_kvs.size() != 5 // aggregated stats and 4 splitted stats: num_rowsets num_segs data_size num_rows + && stats_kvs.size() != 2 // aggregated stats and 1 splitted stats: num_rowsets + && stats_kvs.size() != 1 // aggregated stats only (nothing has been imported since created) + ) { + unexpected_size = true; } + // clang-format on + std::stringstream ss; for (size_t i = 1; i < stats_kvs.size(); ++i) { std::string_view k(stats_kvs[i].first), v(stats_kvs[i].second); k.remove_prefix(1); @@ -111,6 +118,8 @@ int get_detached_tablet_stats(const std::vector> $DORIS_HOME/be/conf/be.conf +mkdir -p /opt/apache-doris/be/cache +echo 'file_cache_path = [{"path":"/opt/apache-doris/be/cache","total_size":107374182400,"query_limit":107374182400}]' >> $DORIS_HOME/be/conf/be.conf function log_stderr() { diff --git a/docker/runtime/doris-compose/cluster.py b/docker/runtime/doris-compose/cluster.py index 0ce12f3c7d4d19..3a2d95ac986bfd 100644 --- a/docker/runtime/doris-compose/cluster.py +++ b/docker/runtime/doris-compose/cluster.py @@ -49,6 +49,8 @@ IP_PART4_SIZE = 200 +CLUSTER_ID = "12345678" + LOG = utils.get_logger() @@ -412,7 +414,7 @@ def get_add_init_config(self): if self.cluster.sql_mode_node_mgr: cfg += [ - "cloud_instance_id = " + self.cloud_instance_id(), + "cluster_id = " + CLUSTER_ID, ] else: cfg += [ @@ -439,9 +441,6 @@ def docker_env(self): def cloud_unique_id(self): return "sql_server_{}".format(self.id) - def cloud_instance_id(self): - return "reg_cloud_instance" - def entrypoint(self): return ["bash", os.path.join(DOCKER_RESOURCE_PATH, "init_fe.sh")] @@ -484,9 +483,9 @@ def get_add_init_config(self): "meta_service_endpoint = {}".format( self.cluster.get_meta_server_addr()), ] - if self.cluster.be_cloud_instanceid: + if self.cluster.be_cluster_id: cfg += [ - "cloud_instance_id = " + self.cloud_instance_id(), + "cluster_id = " + CLUSTER_ID, ] if not self.cluster.sql_mode_node_mgr: cfg += [ @@ -553,9 +552,6 @@ def docker_env(self): def cloud_unique_id(self): return "compute_node_{}".format(self.id) - def cloud_instance_id(self): - return "reg_cloud_instance" - def docker_home_dir(self): return os.path.join(DOCKER_DORIS_PATH, "be") @@ -666,7 +662,7 @@ class Cluster(object): def __init__(self, name, subnet, image, is_cloud, fe_config, be_config, ms_config, recycle_config, fe_follower, be_disks, be_cluster, reg_be, coverage_dir, cloud_store_config, sql_mode_node_mgr, - be_metaservice_endpoint, be_cloud_instanceid): + be_metaservice_endpoint, be_cluster_id): self.name = name self.subnet = subnet self.image = image @@ -687,13 +683,13 @@ def __init__(self, name, subnet, image, is_cloud, fe_config, be_config, } self.sql_mode_node_mgr = sql_mode_node_mgr self.be_metaservice_endpoint = be_metaservice_endpoint - self.be_cloud_instanceid = be_cloud_instanceid + self.be_cluster_id = be_cluster_id @staticmethod def new(name, image, is_cloud, fe_config, be_config, ms_config, recycle_config, fe_follower, be_disks, be_cluster, reg_be, coverage_dir, cloud_store_config, sql_mode_node_mgr, - be_metaservice_endpoint, be_cloud_instanceid): + be_metaservice_endpoint, be_cluster_id): if not os.path.exists(LOCAL_DORIS_PATH): os.makedirs(LOCAL_DORIS_PATH, exist_ok=True) os.chmod(LOCAL_DORIS_PATH, 0o777) @@ -707,7 +703,7 @@ def new(name, image, is_cloud, fe_config, be_config, ms_config, fe_follower, be_disks, be_cluster, reg_be, coverage_dir, cloud_store_config, sql_mode_node_mgr, be_metaservice_endpoint, - be_cloud_instanceid) + be_cluster_id) os.makedirs(cluster.get_path(), exist_ok=True) os.makedirs(get_status_path(name), exist_ok=True) cluster._save_meta() diff --git a/docker/runtime/doris-compose/command.py b/docker/runtime/doris-compose/command.py index e162368bf657e2..3a5afc714ddda3 100644 --- a/docker/runtime/doris-compose/command.py +++ b/docker/runtime/doris-compose/command.py @@ -322,19 +322,19 @@ def add_parser(self, args_parsers): if self._support_boolean_action(): parser.add_argument( - "--be-cloud-instanceid", + "--be-cluster-id", default=True, action=self._get_parser_bool_action(False), help= - "Do not set BE cloud instance ID in conf. Default is False.") + "Do not set BE cluster ID in conf. Default is False.") else: parser.add_argument( - "--no-be-cloud-instanceid", - dest='be_cloud_instanceid', + "--no-be-cluster-id", + dest='be_cluster_id', default=True, action=self._get_parser_bool_action(False), help= - "Do not set BE cloud instance ID in conf. Default is False.") + "Do not set BE cluser ID in conf. Default is False.") parser.add_argument( "--fdb-version", @@ -434,7 +434,7 @@ def run(self, args): args.be_config, args.ms_config, args.recycle_config, args.fe_follower, args.be_disks, args.be_cluster, args.reg_be, args.coverage_dir, cloud_store_config, args.sql_mode_node_mgr, - args.be_metaservice_endpoint, args.be_cloud_instanceid) + args.be_metaservice_endpoint, args.be_cluster_id) LOG.info("Create new cluster {} succ, cluster path is {}".format( args.NAME, cluster.get_path())) diff --git a/docker/thirdparties/docker-compose/hive/hadoop-hive-2x.env.tpl b/docker/thirdparties/docker-compose/hive/hadoop-hive-2x.env.tpl new file mode 100644 index 00000000000000..6222972176a9e1 --- /dev/null +++ b/docker/thirdparties/docker-compose/hive/hadoop-hive-2x.env.tpl @@ -0,0 +1,17 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + diff --git a/docker/thirdparties/docker-compose/hive/hadoop-hive-3x.env.tpl b/docker/thirdparties/docker-compose/hive/hadoop-hive-3x.env.tpl new file mode 100644 index 00000000000000..84bfce1754f77d --- /dev/null +++ b/docker/thirdparties/docker-compose/hive/hadoop-hive-3x.env.tpl @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +HIVE_SITE_CONF_hive_metastore_event_db_notification_api_auth=false +HIVE_SITE_CONF_hive_metastore_dml_events=true +HIVE_SITE_CONF_hive_metastore_transactional_event_listeners=org.apache.hive.hcatalog.listener.DbNotificationListener + diff --git a/docker/thirdparties/docker-compose/hive/hadoop-hive.env.tpl b/docker/thirdparties/docker-compose/hive/hadoop-hive.env.tpl index b7e662f5e524bf..0e07422841098a 100644 --- a/docker/thirdparties/docker-compose/hive/hadoop-hive.env.tpl +++ b/docker/thirdparties/docker-compose/hive/hadoop-hive.env.tpl @@ -28,9 +28,6 @@ HIVE_SITE_CONF_hive_server2_webui_port=0 HIVE_SITE_CONF_hive_compactor_initiator_on=true HIVE_SITE_CONF_hive_compactor_worker_threads=2 HIVE_SITE_CONF_metastore_storage_schema_reader_impl=org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader -HIVE_SITE_CONF_hive_metastore_event_db_notification_api_auth=false -HIVE_SITE_CONF_hive_metastore_dml_events=true -HIVE_SITE_CONF_hive_metastore_transactional_event_listeners=org.apache.hive.hcatalog.listener.DbNotificationListener CORE_CONF_fs_defaultFS=hdfs://${IP_HOST}:${FS_PORT} CORE_CONF_hadoop_http_staticuser_user=root diff --git a/docker/thirdparties/docker-compose/hive/hive-2x.yaml.tpl b/docker/thirdparties/docker-compose/hive/hive-2x.yaml.tpl index 5eddbb81c53d6f..acc253e70272f7 100644 --- a/docker/thirdparties/docker-compose/hive/hive-2x.yaml.tpl +++ b/docker/thirdparties/docker-compose/hive/hive-2x.yaml.tpl @@ -24,7 +24,7 @@ services: environment: - CLUSTER_NAME=test env_file: - - ./hadoop-hive.env + - ./hadoop-hive-2x.env container_name: ${CONTAINER_UID}hadoop2-namenode expose: - "50070" @@ -39,7 +39,7 @@ services: datanode: image: bde2020/hadoop-datanode:2.0.0-hadoop2.7.4-java8 env_file: - - ./hadoop-hive.env + - ./hadoop-hive-2x.env environment: SERVICE_PRECONDITION: "${IP_HOST}:50070" container_name: ${CONTAINER_UID}hadoop2-datanode @@ -55,7 +55,7 @@ services: hive-server: image: bde2020/hive:2.3.2-postgresql-metastore env_file: - - ./hadoop-hive.env + - ./hadoop-hive-2x.env environment: HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:postgresql://${IP_HOST}:${PG_PORT}/metastore" SERVICE_PRECONDITION: "${IP_HOST}:${HMS_PORT}" @@ -76,7 +76,7 @@ services: hive-metastore: image: bde2020/hive:2.3.2-postgresql-metastore env_file: - - ./hadoop-hive.env + - ./hadoop-hive-2x.env command: /bin/bash /mnt/scripts/hive-metastore.sh environment: SERVICE_PRECONDITION: "${IP_HOST}:50070 ${IP_HOST}:50075 ${IP_HOST}:${PG_PORT}" diff --git a/docker/thirdparties/docker-compose/hive/hive-3x.yaml.tpl b/docker/thirdparties/docker-compose/hive/hive-3x.yaml.tpl index 57033c3b2d5294..1f6eca76d431df 100644 --- a/docker/thirdparties/docker-compose/hive/hive-3x.yaml.tpl +++ b/docker/thirdparties/docker-compose/hive/hive-3x.yaml.tpl @@ -24,7 +24,7 @@ services: environment: - CLUSTER_NAME=test env_file: - - ./hadoop-hive.env + - ./hadoop-hive-3x.env container_name: ${CONTAINER_UID}hadoop3-namenode expose: - "9870" @@ -39,7 +39,7 @@ services: datanode: image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8 env_file: - - ./hadoop-hive.env + - ./hadoop-hive-3x.env environment: SERVICE_PRECONDITION: "${IP_HOST}:9870" container_name: ${CONTAINER_UID}hadoop3-datanode @@ -55,7 +55,7 @@ services: hive-server: image: doristhirdpartydocker/hive:3.1.2-postgresql-metastore env_file: - - ./hadoop-hive.env + - ./hadoop-hive-3x.env environment: HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:postgresql://${IP_HOST}:${PG_PORT}/metastore" SERVICE_PRECONDITION: "${IP_HOST}:${HMS_PORT}" @@ -76,7 +76,7 @@ services: hive-metastore: image: doristhirdpartydocker/hive:3.1.2-postgresql-metastore env_file: - - ./hadoop-hive.env + - ./hadoop-hive-3x.env command: /bin/bash /mnt/scripts/hive-metastore.sh environment: SERVICE_PRECONDITION: "${IP_HOST}:9870 ${IP_HOST}:9864 ${IP_HOST}:${PG_PORT}" diff --git a/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh b/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh index 8338f467952831..aa27516164e9fd 100755 --- a/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh +++ b/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh @@ -40,7 +40,7 @@ find "${DATA_DIR}" -type f -name "run.sh" -print0 | xargs -0 -n 1 -P 10 -I {} sh if [[ ! -d "/mnt/scripts/tpch1.db" ]]; then echo "/mnt/scripts/tpch1.db does not exist" cd /mnt/scripts/ - curl -O https://doris-build-hk-1308700295.cos.ap-hongkong.myqcloud.com/regression/datalake/pipeline_data/tpch1.db.tar.gz + curl -O https://doris-regression-hk.oss-cn-hongkong.aliyuncs.com/regression/datalake/pipeline_data/tpch1.db.tar.gz tar -zxf tpch1.db.tar.gz rm -rf tpch1.db.tar.gz cd - @@ -53,17 +53,14 @@ fi hadoop fs -mkdir -p /user/doris/ hadoop fs -put /mnt/scripts/tpch1.db /user/doris/ -# if you test in your local,better use # to annotation section about paimon -if [[ ! -d "/mnt/scripts/paimon1" ]]; then - echo "/mnt/scripts/paimon1 does not exist" - cd /mnt/scripts/ - curl -O https://doris-build-hk-1308700295.cos.ap-hongkong.myqcloud.com/regression/datalake/pipeline_data/paimon1.tar.gz - tar -zxf paimon1.tar.gz - rm -rf paimon1.tar.gz - cd - -else - echo "/mnt/scripts/paimon1 exist, continue !" -fi +# paimon data file is small and update frequently, so we download it every time +rm -rf "/mnt/scripts/paimon1" +echo "/mnt/scripts/paimon1 does not exist" +cd /mnt/scripts/ +curl -O https://doris-regression-hk.oss-cn-hongkong.aliyuncs.com/regression/datalake/pipeline_data/paimon1.tar.gz +tar -zxf paimon1.tar.gz +rm -rf paimon1.tar.gz +cd - ## put paimon1 hadoop fs -put /mnt/scripts/paimon1 /user/doris/ diff --git a/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/run06.sql b/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/run06.sql new file mode 100644 index 00000000000000..7aa4170eab0985 --- /dev/null +++ b/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/run06.sql @@ -0,0 +1,25 @@ +use paimon; +create database if not exists test_paimon_spark; +use test_paimon_spark; + +drop table if exists test_tb_mix_format; +create table test_tb_mix_format ( + id int, + value int, + par string +) PARTITIONED BY (par) TBLPROPERTIES ( + 'primary-key' = 'id, par', + 'bucket'=1000, + 'file.format'='orc' +); +-- orc format in partition a +insert into test_tb_mix_format values (1,1,'a'),(2,1,'a'),(3,1,'a'),(4,1,'a'),(5,1,'a'),(6,1,'a'),(7,1,'a'),(8,1,'a'),(9,1,'a'),(10,1,'a'); +-- update some data, these splits will be readed by jni +insert into test_tb_mix_format values (1,2,'a'),(2,2,'a'),(3,2,'a'),(4,2,'a'),(5,2,'a'); +-- parquet format in partition b +alter table test_tb_mix_format set TBLPROPERTIES ('file.format'='parquet'); +insert into test_tb_mix_format values (1,1,'b'),(2,1,'b'),(3,1,'b'),(4,1,'b'),(5,1,'b'),(6,1,'b'),(7,1,'b'),(8,1,'b'),(9,1,'b'),(10,1,'b'); +-- update some data, these splits will be readed by jni +insert into test_tb_mix_format values (1,2,'b'),(2,2,'b'),(3,2,'b'),(4,2,'b'),(5,2,'b'); +-- delete foramt in table properties, doris should get format by file name +alter table test_tb_mix_format unset TBLPROPERTIES ('file.format'); \ No newline at end of file diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index dd31689b3bf4f2..36d2486a1c32be 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -354,10 +354,11 @@ if [[ "${RUN_HIVE2}" -eq 1 ]]; then export CONTAINER_UID=${CONTAINER_UID} . "${ROOT}"/docker-compose/hive/hive-2x_settings.env envsubst <"${ROOT}"/docker-compose/hive/hive-2x.yaml.tpl >"${ROOT}"/docker-compose/hive/hive-2x.yaml - envsubst <"${ROOT}"/docker-compose/hive/hadoop-hive.env.tpl >"${ROOT}"/docker-compose/hive/hadoop-hive.env - sudo docker compose -p ${CONTAINER_UID}hive2 -f "${ROOT}"/docker-compose/hive/hive-2x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive.env down + envsubst <"${ROOT}"/docker-compose/hive/hadoop-hive.env.tpl >"${ROOT}"/docker-compose/hive/hadoop-hive-2x.env + envsubst <"${ROOT}"/docker-compose/hive/hadoop-hive-2x.env.tpl >> "${ROOT}"/docker-compose/hive/hadoop-hive-2x.env + sudo docker compose -p ${CONTAINER_UID}hive2 -f "${ROOT}"/docker-compose/hive/hive-2x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive-2x.env down if [[ "${STOP}" -ne 1 ]]; then - sudo docker compose -p ${CONTAINER_UID}hive2 -f "${ROOT}"/docker-compose/hive/hive-2x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive.env up --build --remove-orphans -d + sudo docker compose -p ${CONTAINER_UID}hive2 -f "${ROOT}"/docker-compose/hive/hive-2x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive-2x.env up --build --remove-orphans -d fi fi @@ -392,10 +393,11 @@ if [[ "${RUN_HIVE3}" -eq 1 ]]; then export CONTAINER_UID=${CONTAINER_UID} . "${ROOT}"/docker-compose/hive/hive-3x_settings.env envsubst <"${ROOT}"/docker-compose/hive/hive-3x.yaml.tpl >"${ROOT}"/docker-compose/hive/hive-3x.yaml - envsubst <"${ROOT}"/docker-compose/hive/hadoop-hive.env.tpl >"${ROOT}"/docker-compose/hive/hadoop-hive.env - sudo docker compose -p ${CONTAINER_UID}hive3 -f "${ROOT}"/docker-compose/hive/hive-3x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive.env down + envsubst <"${ROOT}"/docker-compose/hive/hadoop-hive.env.tpl >"${ROOT}"/docker-compose/hive/hadoop-hive-3x.env + envsubst <"${ROOT}"/docker-compose/hive/hadoop-hive-3x.env.tpl >> "${ROOT}"/docker-compose/hive/hadoop-hive-3x.env + sudo docker compose -p ${CONTAINER_UID}hive3 -f "${ROOT}"/docker-compose/hive/hive-3x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive-3x.env down if [[ "${STOP}" -ne 1 ]]; then - sudo docker compose -p ${CONTAINER_UID}hive3 -f "${ROOT}"/docker-compose/hive/hive-3x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive.env up --build --remove-orphans -d + sudo docker compose -p ${CONTAINER_UID}hive3 -f "${ROOT}"/docker-compose/hive/hive-3x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive-3x.env up --build --remove-orphans -d fi fi diff --git a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java index 3e681fa1519166..6e38992657144f 100644 --- a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java +++ b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java @@ -100,6 +100,9 @@ public BaseJdbcExecutor(byte[] thriftParams) throws Exception { } public void close() throws Exception { + if (outputTable != null) { + outputTable.close(); + } try { if (stmt != null && !stmt.isClosed()) { try { @@ -112,8 +115,8 @@ public void close() throws Exception { if (conn != null && resultSet != null) { abortReadConnection(conn, resultSet); } - closeResources(resultSet, stmt, conn); } finally { + closeResources(resultSet, stmt, conn); if (config.getConnectionPoolMinSize() == 0 && hikariDataSource != null) { hikariDataSource.close(); JdbcDataSource.getDataSource().getSourcesMap().remove(config.createCacheKey()); diff --git a/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeColumnValue.java b/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeColumnValue.java index 0c3a9283a8152f..98900edce5a546 100644 --- a/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeColumnValue.java +++ b/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeColumnValue.java @@ -68,6 +68,10 @@ public MaxComputeColumnValue() { idx = 0; } + public void setColumnIdx(int idx) { + this.idx = idx; + } + public MaxComputeColumnValue(ValueVector valueVector, int i) { this.column = valueVector; this.idx = i; @@ -89,79 +93,58 @@ public boolean isNull() { return column.isNull(idx); } - private void skippedIfNull() { - // null has been process by appendValue with isNull() - try { - if (column.isNull(idx)) { - idx++; - } - } catch (IndexOutOfBoundsException e) { - // skip left rows - idx++; - } - } - @Override public boolean getBoolean() { - skippedIfNull(); BitVector bitCol = (BitVector) column; - return bitCol.get(idx++) != 0; + return bitCol.get(idx) != 0; } @Override public byte getByte() { - skippedIfNull(); TinyIntVector tinyIntCol = (TinyIntVector) column; - return tinyIntCol.get(idx++); + return tinyIntCol.get(idx); } @Override public short getShort() { - skippedIfNull(); SmallIntVector smallIntCol = (SmallIntVector) column; - return smallIntCol.get(idx++); + return smallIntCol.get(idx); } @Override public int getInt() { - skippedIfNull(); IntVector intCol = (IntVector) column; - return intCol.get(idx++); + return intCol.get(idx); } @Override public float getFloat() { - skippedIfNull(); Float4Vector floatCol = (Float4Vector) column; - return floatCol.get(idx++); + return floatCol.get(idx); } @Override public long getLong() { - skippedIfNull(); BigIntVector longCol = (BigIntVector) column; - return longCol.get(idx++); + return longCol.get(idx); } @Override public double getDouble() { - skippedIfNull(); Float8Vector doubleCol = (Float8Vector) column; - return doubleCol.get(idx++); + return doubleCol.get(idx); } @Override public BigInteger getBigInteger() { - skippedIfNull(); BigIntVector longCol = (BigIntVector) column; - return BigInteger.valueOf(longCol.get(idx++)); + return BigInteger.valueOf(longCol.get(idx)); } @Override public BigDecimal getDecimal() { - skippedIfNull(); DecimalVector decimalCol = (DecimalVector) column; - return getBigDecimalFromArrowBuf(column.getDataBuffer(), idx++, + return getBigDecimalFromArrowBuf(column.getDataBuffer(), idx, decimalCol.getScale(), DecimalVector.TYPE_WIDTH); } @@ -195,26 +178,23 @@ public static BigDecimal getBigDecimalFromArrowBuf(ArrowBuf byteBuf, int index, @Override public String getString() { - skippedIfNull(); VarCharVector varcharCol = (VarCharVector) column; - String v = varcharCol.getObject(idx++).toString(); + String v = varcharCol.getObject(idx).toString(); return v == null ? new String(new byte[0]) : v; } public String getChar() { - skippedIfNull(); VarCharVector varcharCol = (VarCharVector) column; - return varcharCol.getObject(idx++).toString().stripTrailing(); + return varcharCol.getObject(idx).toString().stripTrailing(); } // Maybe I can use `appendBytesAndOffset(byte[] src, int offset, int length)` to reduce the creation of byte[]. // But I haven't figured out how to write it elegantly. public byte[] getCharAsBytes() { - skippedIfNull(); VarCharVector varcharCol = (VarCharVector) column; - byte[] v = varcharCol.getObject(idx++).getBytes(); + byte[] v = varcharCol.getObject(idx).getBytes(); if (v == null) { return new byte[0]; @@ -230,31 +210,28 @@ public byte[] getCharAsBytes() { @Override public byte[] getStringAsBytes() { - skippedIfNull(); VarCharVector varcharCol = (VarCharVector) column; - byte[] v = varcharCol.getObject(idx++).getBytes(); + byte[] v = varcharCol.getObject(idx).getBytes(); return v == null ? new byte[0] : v; } @Override public LocalDate getDate() { - skippedIfNull(); DateDayVector dateCol = (DateDayVector) column; - Integer intVal = dateCol.getObject(idx++); + Integer intVal = dateCol.getObject(idx); return LocalDate.ofEpochDay(intVal == null ? 0 : intVal); } @Override public LocalDateTime getDateTime() { - skippedIfNull(); LocalDateTime result; ArrowType.Timestamp timestampType = ( ArrowType.Timestamp) column.getField().getFieldType().getType(); if (timestampType.getUnit() == org.apache.arrow.vector.types.TimeUnit.MILLISECOND) { - result = convertToLocalDateTime((TimeStampMilliTZVector) column, idx++); + result = convertToLocalDateTime((TimeStampMilliTZVector) column, idx); } else { NullableTimeStampNanoHolder valueHoder = new NullableTimeStampNanoHolder(); - ((TimeStampNanoVector) column).get(idx++, valueHoder); + ((TimeStampNanoVector) column).get(idx, valueHoder); long timestampNanos = valueHoder.value; result = LocalDateTime.ofEpochSecond(timestampNanos / 1_000_000_000, @@ -264,10 +241,10 @@ public LocalDateTime getDateTime() { /* timestampType.getUnit() result = switch (timestampType.getUnit()) { - case MICROSECOND -> convertToLocalDateTime((TimeStampMicroTZVector) column, idx++); - case SECOND -> convertToLocalDateTime((TimeStampSecTZVector) column, idx++); - case MILLISECOND -> convertToLocalDateTime((TimeStampMilliTZVector) column, idx++); - case NANOSECOND -> convertToLocalDateTime((TimeStampNanoTZVector) column, idx++); + case MICROSECOND -> convertToLocalDateTime((TimeStampMicroTZVector) column, idx); + case SECOND -> convertToLocalDateTime((TimeStampSecTZVector) column, idx); + case MILLISECOND -> convertToLocalDateTime((TimeStampMilliTZVector) column, idx); + case NANOSECOND -> convertToLocalDateTime((TimeStampNanoTZVector) column, idx); }; Because : @@ -287,9 +264,9 @@ public LocalDateTime getDateTime() { TIMESTAMP_NTZ is NTZ => column is TimeStampNanoVector So: - case SECOND -> convertToLocalDateTime((TimeStampSecTZVector) column, idx++); - case MICROSECOND -> convertToLocalDateTime((TimeStampMicroTZVector) column, idx++); - case NANOSECOND -> convertToLocalDateTime((TimeStampNanoTZVector) column, idx++); + case SECOND -> convertToLocalDateTime((TimeStampSecTZVector) column, idx); + case MICROSECOND -> convertToLocalDateTime((TimeStampMicroTZVector) column, idx); + case NANOSECOND -> convertToLocalDateTime((TimeStampNanoTZVector) column, idx); may never be used. */ @@ -298,15 +275,13 @@ public LocalDateTime getDateTime() { @Override public byte[] getBytes() { - skippedIfNull(); VarBinaryVector binaryCol = (VarBinaryVector) column; - byte[] v = binaryCol.getObject(idx++); + byte[] v = binaryCol.getObject(idx); return v == null ? new byte[0] : v; } @Override public void unpackArray(List values) { - skippedIfNull(); ListVector listCol = (ListVector) column; int elemSize = listCol.getObject(idx).size(); for (int i = 0; i < elemSize; i++) { @@ -314,12 +289,10 @@ public void unpackArray(List values) { values.add(val); offset++; } - idx++; } @Override public void unpackMap(List keys, List values) { - skippedIfNull(); MapVector mapCol = (MapVector) column; int elemSize = mapCol.getElementEndIndex(idx) - mapCol.getElementStartIndex(idx); List innerCols = ((StructVector) mapCol.getDataVector()).getChildrenFromFields(); @@ -332,19 +305,16 @@ public void unpackMap(List keys, List values) { values.add(val); offset++; } - idx++; } @Override public void unpackStruct(List structFieldIndex, List values) { - skippedIfNull(); StructVector structCol = (StructVector) column; List innerCols = structCol.getChildrenFromFields(); for (Integer fieldIndex : structFieldIndex) { MaxComputeColumnValue val = new MaxComputeColumnValue(innerCols.get(fieldIndex), idx); values.add(val); } - idx++; } public static LocalDateTime convertToLocalDateTime(TimeStampMilliTZVector milliTZVector, int index) { diff --git a/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeJniScanner.java b/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeJniScanner.java index df8066a9fa3241..c72153c3449aac 100644 --- a/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeJniScanner.java +++ b/fe/be-java-extensions/max-compute-scanner/src/main/java/org/apache/doris/maxcompute/MaxComputeJniScanner.java @@ -226,6 +226,7 @@ private int readVectors(int expectedRows) throws IOException { } columnValue.reset(column); for (int j = 0; j < batchRows; j++) { + columnValue.setColumnIdx(j); appendData(readColumnId, columnValue); } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java index 97f9cdc70b516a..6f6068043893ab 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java @@ -323,7 +323,7 @@ public class Config extends ConfigBase { + "The connection is abandoned if the clock skew is larger than this value."}) public static long max_bdbje_clock_delta_ms = 5000; // 5s - @ConfField(description = {"是否启用所有 http 接口的认证", + @ConfField(mutable = true, description = {"是否启用所有 http 接口的认证", "Whether to enable all http interface authentication"}, varType = VariableAnnotation.EXPERIMENTAL) public static boolean enable_all_http_auth = false; @@ -496,6 +496,10 @@ public class Config extends ConfigBase { "print log interval for publish transaction failed interval"}) public static long publish_fail_log_interval_second = 5 * 60; + @ConfField(mutable = true, masterOnly = true, description = {"一个 PUBLISH_VERSION 任务打印失败日志的次数上限", + "the upper limit of failure logs of PUBLISH_VERSION task"}) + public static long publish_version_task_failed_log_threshold = 80; + @ConfField(mutable = true, masterOnly = true, description = {"提交事务的最大超时时间,单位是秒。" + "该参数仅用于事务型 insert 操作中。", "Maximal waiting time for all data inserted before one transaction to be committed, in seconds. " @@ -1207,6 +1211,12 @@ public class Config extends ConfigBase { @ConfField(mutable = true, masterOnly = true) public static int max_routine_load_task_num_per_be = 1024; + /** + * routine load timeout is equal to maxBatchIntervalS * routine_load_task_timeout_multiplier. + */ + @ConfField(mutable = true, masterOnly = true) + public static int routine_load_task_timeout_multiplier = 10; + /** * the max timeout of get kafka meta. */ @@ -2313,6 +2323,12 @@ public class Config extends ConfigBase { @ConfField public static long ranger_cache_size = 10000; + @ConfField(description = { + "鉴权插件配置文件路径,需在 DORIS_HOME 下,默认为 conf/authorization.conf", + "Authorization plugin configuration file path, need to be in DORIS_HOME," + + "default is conf/authorization.conf"}) + public static String authorization_config_file_path = "conf/authorization.conf"; + /** * This configuration is used to enable the statistics of query information, which will record * the access status of databases, tables, and columns, and can be used to guide the @@ -2726,6 +2742,12 @@ public class Config extends ConfigBase { }) public static int profile_async_collect_expire_time_secs = 5; + @ConfField(description = { + "用于控制 ProfileManager 进行 Profile 垃圾回收的间隔时间,垃圾回收期间 ProfileManager 会把多余的以及过期的 profile " + + "从内存和磁盘中清理掉,节省内存。", + "Used to control the interval time of ProfileManager for profile garbage collection. " + }) + public static int profile_manager_gc_interval_seconds = 1; // Used to check compatibility when upgrading. @ConfField public static boolean enable_check_compatibility_mode = false; @@ -2803,16 +2825,12 @@ public class Config extends ConfigBase { public static String deploy_mode = ""; // compatibily with elder version. - // cloud_unique_id is introduced before cloud_instance_id, so it has higher priority. + // cloud_unique_id has higher priority than cluster_id. @ConfField public static String cloud_unique_id = ""; - // If cloud_unique_id is empty, cloud_instance_id works, otherwise cloud_unique_id works. - @ConfField - public static String cloud_instance_id = ""; - public static boolean isCloudMode() { - return deploy_mode.equals("cloud") || !cloud_unique_id.isEmpty() || !cloud_instance_id.isEmpty(); + return deploy_mode.equals("cloud") || !cloud_unique_id.isEmpty(); } public static boolean isNotCloudMode() { @@ -2887,6 +2905,8 @@ public static int metaServiceRpcRetryTimes() { @ConfField public static boolean enable_cloud_snapshot_version = true; + // Interval in seconds for checking the status of compute groups (cloud clusters). + // Compute groups and cloud clusters refer to the same concept. @ConfField public static int cloud_cluster_check_interval_second = 10; @@ -2970,7 +2990,7 @@ public static int metaServiceRpcRetryTimes() { public static String security_checker_class_name = ""; @ConfField(mutable = true) - public static int mow_insert_into_commit_retry_times = 10; + public static int mow_calculate_delete_bitmap_retry_times = 10; @ConfField(mutable = true, description = {"指定S3 Load endpoint白名单, 举例: s3_load_endpoint_white_list=a,b,c", "the white list for the s3 load endpoint, if it is empty, no white list will be set," @@ -3004,7 +3024,7 @@ public static int metaServiceRpcRetryTimes() { // to control the max num of values inserted into cache hotspot internal table // insert into cache table when the size of batch values reaches this limit @ConfField(mutable = true) - public static long batch_insert_cluster_cache_hotspot_num = 50; + public static long batch_insert_cluster_cache_hotspot_num = 1000; /** * intervals between be status checks for CloudUpgradeMgr @@ -3042,6 +3062,15 @@ public static int metaServiceRpcRetryTimes() { @ConfField(mutable = true, description = {"存算分离模式下,当tablet分布的be异常,是否立即映射tablet到新的be上,默认true"}) public static boolean enable_immediate_be_assign = true; + @ConfField(mutable = true, description = {"存算分离模式下是否启用自动启停功能,默认true", + "Whether to enable the automatic start-stop feature in cloud model, default is true."}) + public static boolean enable_auto_start_for_cloud_cluster = true; + + @ConfField(mutable = true, description = {"存算分离模式下自动启停等待cluster唤醒退避重试次数,默认300次大约5分钟", + "The automatic start-stop wait time for cluster wake-up backoff retry count in the cloud " + + "model is set to 300 times, which is approximately 5 minutes by default."}) + public static int auto_start_wait_to_resume_times = 300; + // ATTN: DONOT add any config not related to cloud mode here // ATTN: DONOT add any config not related to cloud mode here // ATTN: DONOT add any config not related to cloud mode here diff --git a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 index 9695c34ebbdbbb..befa2997624417 100644 --- a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 +++ b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 @@ -114,6 +114,7 @@ BINARY: 'BINARY'; BINLOG: 'BINLOG'; BITAND: 'BITAND'; BITMAP: 'BITMAP'; +BITMAP_EMPTY: 'BITMAP_EMPTY'; BITMAP_UNION: 'BITMAP_UNION'; BITOR: 'BITOR'; BITXOR: 'BITXOR'; @@ -153,6 +154,7 @@ COMMITTED: 'COMMITTED'; COMPACT: 'COMPACT'; COMPLETE: 'COMPLETE'; COMPRESS_TYPE: 'COMPRESS_TYPE'; +COMPUTE: 'COMPUTE'; CONDITIONS: 'CONDITIONS'; CONFIG: 'CONFIG'; CONNECTION: 'CONNECTION'; @@ -553,6 +555,7 @@ VARIABLE: 'VARIABLE'; VARIABLES: 'VARIABLES'; VARIANT: 'VARIANT'; VAULT: 'VAULT'; +VAULTS: 'VAULTS'; VERBOSE: 'VERBOSE'; VERSION: 'VERSION'; VIEW: 'VIEW'; diff --git a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 index e1157b1f432f8d..29496dcc893fd6 100644 --- a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 +++ b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 @@ -154,7 +154,7 @@ supportedCreateStatement properties=propertyClause? (BROKER extProperties=propertyClause)? (AS query)? #createTable - | CREATE VIEW (IF NOT EXISTS)? name=multipartIdentifier + | CREATE (OR REPLACE)? VIEW (IF NOT EXISTS)? name=multipartIdentifier (LEFT_PAREN cols=simpleColumnDefs RIGHT_PAREN)? (COMMENT STRING_LITERAL)? AS query #createView | CREATE (EXTERNAL)? TABLE (IF NOT EXISTS)? name=multipartIdentifier @@ -183,8 +183,9 @@ unsupportedOtherStatement | UNINSTALL PLUGIN name=identifierOrText #uninstallPlugin | LOCK TABLES (lockTable (COMMA lockTable)*)? #lockTables | UNLOCK TABLES #unlockTables - | WARM UP CLUSTER destination=identifier WITH - (CLUSTER source=identifier | (warmUpItem (COMMA warmUpItem)*)) FORCE? #warmUpCluster + | WARM UP (CLUSTER | COMPUTE GROUP) destination=identifier WITH + ((CLUSTER | COMPUTE GROUP) source=identifier | + (warmUpItem (COMMA warmUpItem)*)) FORCE? #warmUpCluster | BACKUP SNAPSHOT label=multipartIdentifier TO repo=identifier ((ON | EXCLUDE) LEFT_PAREN baseTableRef (COMMA baseTableRef)* RIGHT_PAREN)? properties=propertyClause? #backup @@ -208,7 +209,7 @@ unsupportedShowStatement | SHOW ROW POLICY (FOR (userIdentify | (ROLE role=identifier)))? #showRowPolicy | SHOW STORAGE POLICY (USING (FOR policy=identifierOrText)?)? #showStoragePolicy | SHOW STAGES #showStages - | SHOW STORAGE VAULT #showStorageVault + | SHOW STORAGE (VAULT | VAULTS) #showStorageVault | SHOW CREATE REPOSITORY FOR identifier #showCreateRepository | SHOW WHITELIST #showWhitelist | SHOW (GLOBAL | SESSION | LOCAL)? VARIABLES wildWhere? #showVariables @@ -307,7 +308,7 @@ unsupportedShowStatement | (FROM tableName=multipartIdentifier (ALL VERBOSE?)?))? #showQueryStats | SHOW BUILD INDEX ((FROM | IN) database=multipartIdentifier)? wildWhere? sortClause? limitClause? #showBuildIndex - | SHOW CLUSTERS #showClusters + | SHOW (CLUSTERS | (COMPUTE GROUPS)) #showClusters | SHOW CONVERT_LSC ((FROM | IN) database=multipartIdentifier)? #showConvertLsc | SHOW REPLICA STATUS FROM baseTableRef wildWhere? #showReplicaStatus | SHOW REPLICA DISTRIBUTION FROM baseTableRef #showREplicaDistribution @@ -495,13 +496,13 @@ unsupportedGrantRevokeStatement : GRANT privilegeList ON multipartIdentifierOrAsterisk TO (userIdentify | ROLE STRING_LITERAL) #grantTablePrivilege | GRANT privilegeList ON - (RESOURCE | CLUSTER | STAGE | STORAGE VAULT | WORKLOAD GROUP) + (RESOURCE | CLUSTER | COMPUTE GROUP | STAGE | STORAGE VAULT | WORKLOAD GROUP) identifierOrTextOrAsterisk TO (userIdentify | ROLE STRING_LITERAL) #grantResourcePrivilege | GRANT roles+=STRING_LITERAL (COMMA roles+=STRING_LITERAL)* TO userIdentify #grantRole | REVOKE privilegeList ON multipartIdentifierOrAsterisk FROM (userIdentify | ROLE STRING_LITERAL) #grantTablePrivilege | REVOKE privilegeList ON - (RESOURCE | CLUSTER | STAGE | STORAGE VAULT | WORKLOAD GROUP) + (RESOURCE | CLUSTER | COMPUTE GROUP | STAGE | STORAGE VAULT | WORKLOAD GROUP) identifierOrTextOrAsterisk FROM (userIdentify | ROLE STRING_LITERAL) #grantResourcePrivilege | REVOKE roles+=STRING_LITERAL (COMMA roles+=STRING_LITERAL)* FROM userIdentify #grantRole ; @@ -1323,7 +1324,7 @@ columnDef ((GENERATED ALWAYS)? AS LEFT_PAREN generatedExpr=expression RIGHT_PAREN)? ((NOT)? nullable=NULL)? (AUTO_INCREMENT (LEFT_PAREN autoIncInitValue=number RIGHT_PAREN)?)? - (DEFAULT (nullValue=NULL | INTEGER_VALUE | DECIMAL_VALUE | PI | E | stringValue=STRING_LITERAL + (DEFAULT (nullValue=NULL | INTEGER_VALUE | DECIMAL_VALUE | PI | E | BITMAP_EMPTY | stringValue=STRING_LITERAL | CURRENT_DATE | defaultTimestamp=CURRENT_TIMESTAMP (LEFT_PAREN defaultValuePrecision=number RIGHT_PAREN)?))? (ON UPDATE CURRENT_TIMESTAMP (LEFT_PAREN onUpdateValuePrecision=number RIGHT_PAREN)?)? (COMMENT comment=STRING_LITERAL)? @@ -1787,6 +1788,7 @@ nonReserved | BIN | BITAND | BITMAP + | BITMAP_EMPTY | BITMAP_UNION | BITOR | BITXOR @@ -1820,6 +1822,7 @@ nonReserved | COMPACT | COMPLETE | COMPRESS_TYPE + | COMPUTE | CONDITIONS | CONFIG | CONNECTION @@ -2082,6 +2085,7 @@ nonReserved | VARIABLES | VARIANT | VAULT + | VAULTS | VERBOSE | VERSION | VIEW diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 29a05856ff3b8e..0ffcaf0c488547 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -319,6 +319,7 @@ terminal String KW_COMPACT, KW_COMPLETE, KW_COMPRESS_TYPE, + KW_COMPUTE, KW_CONFIG, KW_CONNECTION, KW_CONNECTION_ID, @@ -691,7 +692,8 @@ terminal String KW_LINES, KW_IGNORE, KW_CONVERT_LSC, - KW_VAULT; + KW_VAULT, + KW_VAULTS; terminal COMMA, COLON, DOT, DOTDOTDOT, AT, STAR, LPAREN, RPAREN, SEMICOLON, LBRACKET, RBRACKET, LBRACE, RBRACE, DIVIDE, MOD, ADD, SUBTRACT, PLACEHOLDER, ARROW; terminal BITAND, BITOR, BITXOR, BITNOT; @@ -973,6 +975,7 @@ nonterminal List> opt_with_analysis_properties; nonterminal String opt_db, procedure_or_function, opt_comment, opt_comment_null, opt_engine; nonterminal ColumnDef.DefaultValue opt_default_value; +nonterminal Boolean opt_or_replace; nonterminal Boolean opt_if_exists, opt_if_not_exists; nonterminal Boolean opt_external; nonterminal Boolean opt_force; @@ -1481,6 +1484,14 @@ warm_up_stmt ::= {: RESULT = new WarmUpClusterStmt(dstClusterName, list, force); :} + | KW_WARM KW_UP KW_COMPUTE KW_GROUP ident:dstClusterName KW_WITH KW_COMPUTE KW_GROUP ident:srcClusterName opt_force:force + {: + RESULT = new WarmUpClusterStmt(dstClusterName, srcClusterName, force); + :} + | KW_WARM KW_UP KW_COMPUTE KW_GROUP ident:dstClusterName KW_WITH warm_up_list:list opt_force:force + {: + RESULT = new WarmUpClusterStmt(dstClusterName, list, force); + :} ; warm_up_item ::= @@ -1968,10 +1979,10 @@ create_stmt ::= {: RESULT = new CreateUserStmt(ifNotExists, user, userRole, passwdOptions, comment); :} - | KW_CREATE KW_VIEW opt_if_not_exists:ifNotExists table_name:viewName + | KW_CREATE opt_or_replace:orReplace KW_VIEW opt_if_not_exists:ifNotExists table_name:viewName opt_col_with_comment_list:columns opt_comment:comment KW_AS query_stmt:view_def {: - RESULT = new CreateViewStmt(ifNotExists, viewName, columns, comment, view_def); + RESULT = new CreateViewStmt(ifNotExists, orReplace, viewName, columns, comment, view_def); :} | KW_CREATE opt_read_only:isReadOnly KW_REPOSITORY ident:repoName KW_WITH storage_backend:storage {: @@ -3050,6 +3061,14 @@ grant_stmt ::= {: RESULT = new GrantStmt(null, role, resourcePattern, privs, ResourceTypeEnum.CLUSTER); :} + | KW_GRANT privilege_list:privs KW_ON KW_COMPUTE KW_GROUP resource_pattern:resourcePattern KW_TO user_identity:userId + {: + RESULT = new GrantStmt(userId, null, resourcePattern, privs, ResourceTypeEnum.CLUSTER); + :} + | KW_GRANT privilege_list:privs KW_ON KW_COMPUTE KW_GROUP resource_pattern:resourcePattern KW_TO KW_ROLE STRING_LITERAL:role + {: + RESULT = new GrantStmt(null, role, resourcePattern, privs, ResourceTypeEnum.CLUSTER); + :} | KW_GRANT privilege_list:privs KW_ON KW_STAGE resource_pattern:resourcePattern KW_TO user_identity:userId {: RESULT = new GrantStmt(userId, null, resourcePattern, privs, ResourceTypeEnum.STAGE); @@ -3154,6 +3173,14 @@ revoke_stmt ::= {: RESULT = new RevokeStmt(null, role, resourcePattern, privs, ResourceTypeEnum.CLUSTER); :} + | KW_REVOKE privilege_list:privs KW_ON KW_COMPUTE KW_GROUP resource_pattern:resourcePattern KW_FROM user_identity:userId + {: + RESULT = new RevokeStmt(userId, null, resourcePattern, privs, ResourceTypeEnum.CLUSTER); + :} + | KW_REVOKE privilege_list:privs KW_ON KW_COMPUTE KW_GROUP resource_pattern:resourcePattern KW_FROM KW_ROLE STRING_LITERAL:role + {: + RESULT = new RevokeStmt(null, role, resourcePattern, privs, ResourceTypeEnum.CLUSTER); + :} | KW_REVOKE privilege_list:privs KW_ON KW_STAGE resource_pattern:resourcePattern KW_FROM user_identity:userId {: RESULT = new RevokeStmt(userId, null, resourcePattern, privs, ResourceTypeEnum.STAGE); @@ -4090,6 +4117,16 @@ opt_index_type ::= :} ; +opt_or_replace ::= + {: + RESULT = false; + :} + | KW_OR KW_REPLACE + {: + RESULT = true; + :} + ; + opt_if_exists ::= {: RESULT = false; @@ -4190,6 +4227,10 @@ show_stmt ::= {: RESULT = new ShowStorageVaultStmt(); :} + | KW_SHOW KW_STORAGE KW_VAULTS + {: + RESULT = new ShowStorageVaultStmt(); + :} ; show_param ::= @@ -4693,7 +4734,12 @@ show_param ::= /* Cloud Cluster */ | KW_CLUSTERS {: - RESULT = new ShowClusterStmt(); + RESULT = new ShowClusterStmt(false); + :} + /* Compute Group */ + | KW_COMPUTE KW_GROUPS + {: + RESULT = new ShowClusterStmt(true); :} | KW_CONVERT_LSC opt_db:db {: @@ -8422,6 +8468,8 @@ keyword ::= {: RESULT = id; :} | KW_CLUSTERS:id {: RESULT = id; :} + | KW_COMPUTE:id + {: RESULT = id; :} | KW_LINK:id {: RESULT = id; :} | KW_MIGRATE:id @@ -8518,6 +8566,8 @@ keyword ::= {: RESULT = id; :} | KW_VAULT:id {: RESULT = id; :} + | KW_VAULTS:id + {: RESULT = id; :} | KW_VARIANT:id {: RESULT = id; :} | KW_IPV4:id diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java index 672932f1877551..72dc8141e27b98 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java @@ -729,13 +729,12 @@ public void replayModifyViewDef(AlterViewInfo alterViewInfo) throws MetaNotFound try { String viewName = view.getName(); view.setInlineViewDefWithSqlMode(inlineViewDef, alterViewInfo.getSqlMode()); - try { - view.init(); - } catch (UserException e) { - throw new DdlException("failed to init view stmt, reason=" + e.getMessage()); - } view.setNewFullSchema(newFullSchema); + // We do not need to init view here. + // During the `init` phase, some `Alter-View` statements will access the remote file system, + // but they should not access it during the metadata replay phase. + db.unregisterTable(viewName); db.registerTable(view); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/CloudRollupJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/CloudRollupJobV2.java index 57143ed47d72f6..9914dfc6529aa4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/CloudRollupJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/CloudRollupJobV2.java @@ -226,7 +226,7 @@ private void createRollupReplicaForPartition(OlapTable tbl) throws Exception { tbl.getRowStoreColumnsUniqueIds(rowStoreColumns), tbl.getEnableMowLightDelete(), null, tbl.rowStorePageSize(), - tbl.variantEnableFlattenNested()); + tbl.variantEnableFlattenNested(), null); requestBuilder.addTabletMetas(builder); } // end for rollupTablets requestBuilder.setDbId(dbId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/CloudSchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/CloudSchemaChangeJobV2.java index 0a59ec4c93cbaf..01e11f6d631837 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/CloudSchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/CloudSchemaChangeJobV2.java @@ -236,6 +236,10 @@ private void createShadowIndexReplicaForPartition(OlapTable tbl) throws Exceptio short shadowShortKeyColumnCount = indexShortKeyMap.get(shadowIdxId); List shadowSchema = indexSchemaMap.get(shadowIdxId); + List clusterKeyIndexes = null; + if (shadowIdxId == tbl.getBaseIndexId() || isShadowIndexOfBase(shadowIdxId, tbl)) { + clusterKeyIndexes = OlapTable.getClusterKeyIndexes(shadowSchema); + } int shadowSchemaHash = indexSchemaVersionAndHashMap.get(shadowIdxId).schemaHash; int shadowSchemaVersion = indexSchemaVersionAndHashMap.get(shadowIdxId).schemaVersion; long originIndexId = indexIdMap.get(shadowIdxId); @@ -267,7 +271,7 @@ private void createShadowIndexReplicaForPartition(OlapTable tbl) throws Exceptio tbl.getEnableMowLightDelete(), tbl.getInvertedIndexFileStorageFormat(), tbl.rowStorePageSize(), - tbl.variantEnableFlattenNested()); + tbl.variantEnableFlattenNested(), clusterKeyIndexes); requestBuilder.addTabletMetas(builder); } // end for rollupTablets requestBuilder.setDbId(dbId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java index 3a29c0c542e297..62eff35787531d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java @@ -233,7 +233,6 @@ protected void createRollupReplica() throws AlterCancelException { TStorageMedium storageMedium = tbl.getPartitionInfo().getDataProperty(partitionId).getStorageMedium(); TTabletType tabletType = tbl.getPartitionInfo().getTabletType(partitionId); MaterializedIndex rollupIndex = entry.getValue(); - Map tabletIdMap = this.partitionIdToBaseRollupTabletIdMap.get(partitionId); for (Tablet rollupTablet : rollupIndex.getTablets()) { long rollupTabletId = rollupTablet.getId(); @@ -276,6 +275,7 @@ protected void createRollupReplica() throws AlterCancelException { if (this.storageFormat != null) { createReplicaTask.setStorageFormat(this.storageFormat); } + // rollup replica does not need to set mow cluster keys batchTask.addTask(createReplicaTask); } // end for rollupReplicas } // end for rollupTablets diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java index 3051d37527f107..43857b2e898d40 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java @@ -310,7 +310,7 @@ private boolean processDropColumn(DropColumnClause alterClause, OlapTable olapTa boolean lightSchemaChange = olapTable.getEnableLightSchemaChange(); /* * UNIQUE: - * Can not drop any key column. + * Can not drop any key column, cluster key column * AGGREGATION: * Can not drp any key column is has value with REPLACE method */ @@ -844,9 +844,6 @@ private void processReorderColumn(ReorderColumnsClause alterClause, OlapTable ol if (!column.isVisible()) { newSchema.add(column); } - if (column.isClusterKey()) { - throw new DdlException("Can not modify column order in Unique data model table"); - } } } if (newSchema.size() != targetIndexSchema.size()) { @@ -2866,11 +2863,7 @@ public void modifyTableLightSchemaChange(String rawSql, Database db, OlapTable o } Env.getCurrentEnv().getEditLog().logModifyTableAddOrDropInvertedIndices(info); // Drop table column stats after light schema change finished. - try { - Env.getCurrentEnv().getAnalysisManager().dropStats(olapTable, null); - } catch (Exception e) { - LOG.info("Failed to drop stats after light schema change. Reason: {}", e.getMessage()); - } + Env.getCurrentEnv().getAnalysisManager().dropStats(olapTable, null); if (isDropIndex) { // send drop rpc to be @@ -2898,11 +2891,7 @@ public void modifyTableLightSchemaChange(String rawSql, Database db, OlapTable o } Env.getCurrentEnv().getEditLog().logModifyTableAddOrDropColumns(info); // Drop table column stats after light schema change finished. - try { - Env.getCurrentEnv().getAnalysisManager().dropStats(olapTable, null); - } catch (Exception e) { - LOG.info("Failed to drop stats after light schema change. Reason: {}", e.getMessage()); - } + Env.getCurrentEnv().getAnalysisManager().dropStats(olapTable, null); } LOG.info("finished modify table's add or drop or modify columns. table: {}, job: {}, is replay: {}", olapTable.getName(), jobId, isReplay); diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java index dc25da17b8e2f4..ef78611ae9f750 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java @@ -71,6 +71,7 @@ import com.google.common.collect.Table; import com.google.common.collect.Table.Cell; import com.google.gson.annotations.SerializedName; +import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -216,6 +217,20 @@ private void pruneMeta() { partitionOriginIndexIdMap.clear(); } + protected boolean isShadowIndexOfBase(long shadowIdxId, OlapTable tbl) { + if (indexIdToName.get(shadowIdxId).startsWith(SchemaChangeHandler.SHADOW_NAME_PREFIX)) { + String shadowIndexName = indexIdToName.get(shadowIdxId); + String indexName = shadowIndexName + .substring(SchemaChangeHandler.SHADOW_NAME_PREFIX.length()); + long indexId = tbl.getIndexIdByName(indexName); + LOG.info("shadow index id: {}, shadow index name: {}, pointer to index id: {}, index name: {}, " + + "base index id: {}, table_id: {}", shadowIdxId, shadowIndexName, indexId, indexName, + tbl.getBaseIndexId(), tbl.getId()); + return indexId == tbl.getBaseIndexId(); + } + return false; + } + protected void createShadowIndexReplica() throws AlterCancelException { Database db = Env.getCurrentInternalCatalog() .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist")); @@ -261,6 +276,10 @@ protected void createShadowIndexReplica() throws AlterCancelException { short shadowShortKeyColumnCount = indexShortKeyMap.get(shadowIdxId); List shadowSchema = indexSchemaMap.get(shadowIdxId); + List clusterKeyIndexes = null; + if (shadowIdxId == tbl.getBaseIndexId() || isShadowIndexOfBase(shadowIdxId, tbl)) { + clusterKeyIndexes = OlapTable.getClusterKeyIndexes(shadowSchema); + } int shadowSchemaHash = indexSchemaVersionAndHashMap.get(shadowIdxId).schemaHash; long originIndexId = indexIdMap.get(shadowIdxId); int originSchemaHash = tbl.getSchemaHashByIndexId(originIndexId); @@ -309,6 +328,11 @@ protected void createShadowIndexReplica() throws AlterCancelException { } createReplicaTask.setInvertedIndexFileStorageFormat(tbl .getInvertedIndexFileStorageFormat()); + if (!CollectionUtils.isEmpty(clusterKeyIndexes)) { + createReplicaTask.setClusterKeyIndexes(clusterKeyIndexes); + LOG.info("table: {}, partition: {}, index: {}, tablet: {}, cluster key indexes: {}", + tableId, partitionId, shadowIdxId, shadowTabletId, clusterKeyIndexes); + } batchTask.addTask(createReplicaTask); } // end for rollupReplicas } // end for rollupTablets @@ -641,11 +665,7 @@ protected void runRunningJob() throws AlterCancelException { LOG.info("set table's state to NORMAL, table id: {}, job id: {}", tableId, jobId); postProcessOriginIndex(); // Drop table column stats after schema change finished. - try { - Env.getCurrentEnv().getAnalysisManager().dropStats(tbl, null); - } catch (Exception e) { - LOG.info("Failed to drop stats after schema change finished. Reason: {}", e.getMessage()); - } + Env.getCurrentEnv().getAnalysisManager().dropStats(tbl, null); } private void onFinished(OlapTable tbl) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BaseViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BaseViewStmt.java index 545d7c1c57a4dd..e265703128dff7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BaseViewStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BaseViewStmt.java @@ -68,11 +68,22 @@ public String getTable() { return tableName.getTbl(); } + public TableName getTableName() { + return tableName; + } public List getColumns() { return finalCols; } + public List getColWithComments() { + return cols; + } + + public QueryStmt getViewDefStmt() { + return viewDefStmt; + } + public String getInlineViewDef() { return inlineViewDef; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java index 33474f8263ccbb..9bd34e1410be7f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java @@ -354,6 +354,10 @@ public boolean isVisible() { return visible; } + public int getClusterKeyId() { + return this.clusterKeyId; + } + public void setClusterKeyId(int clusterKeyId) { this.clusterKeyId = clusterKeyId; } @@ -423,8 +427,10 @@ public void analyze(boolean isOlap) throws AnalysisException { } if (type.getPrimitiveType() == PrimitiveType.BITMAP) { - if (defaultValue.isSet && defaultValue != DefaultValue.NULL_DEFAULT_VALUE) { - throw new AnalysisException("Bitmap type column can not set default value"); + if (defaultValue.isSet && defaultValue != DefaultValue.NULL_DEFAULT_VALUE + && !defaultValue.value.equals(DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE.value)) { + throw new AnalysisException("Bitmap type column default value only support null or " + + DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE.value); } defaultValue = DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java index 865489a113e810..d3f37b632ca0a5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java @@ -421,9 +421,6 @@ public void analyze(Analyzer analyzer) throws UserException { keysDesc.analyze(columnDefs); if (!CollectionUtils.isEmpty(keysDesc.getClusterKeysColumnNames())) { - if (Config.isCloudMode()) { - throw new AnalysisException("Cluster key is not supported in cloud mode"); - } if (!enableUniqueKeyMergeOnWrite) { throw new AnalysisException("Cluster keys only support unique keys table which enabled " + PropertyAnalyzer.ENABLE_UNIQUE_KEY_MERGE_ON_WRITE); @@ -503,7 +500,7 @@ public void analyze(Analyzer analyzer) throws UserException { columnDef.getType().getPrimitiveType() + " column can't support aggregation " + columnDef.getAggregateType()); } - if (columnDef.isKey()) { + if (columnDef.isKey() || columnDef.getClusterKeyId() != -1) { throw new AnalysisException(columnDef.getType().getPrimitiveType() + " can only be used in the non-key column of the duplicate table at present."); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateViewStmt.java index 08f830a7fde31b..682c553943995d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateViewStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateViewStmt.java @@ -25,6 +25,7 @@ import org.apache.doris.common.UserException; import org.apache.doris.common.util.Util; import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.qe.ConnectContext; import com.google.common.base.Strings; @@ -38,12 +39,14 @@ public class CreateViewStmt extends BaseViewStmt implements NotFallbackInParser private static final Logger LOG = LogManager.getLogger(CreateViewStmt.class); private final boolean ifNotExists; + private final boolean orReplace; private final String comment; - public CreateViewStmt(boolean ifNotExists, TableName tableName, List cols, + public CreateViewStmt(boolean ifNotExists, boolean orReplace, TableName tableName, List cols, String comment, QueryStmt queryStmt) { super(tableName, cols, queryStmt); this.ifNotExists = ifNotExists; + this.orReplace = orReplace; this.comment = Strings.nullToEmpty(comment); } @@ -51,6 +54,10 @@ public boolean isSetIfNotExists() { return ifNotExists; } + public boolean isSetOrReplace() { + return orReplace; + } + public String getComment() { return comment; } @@ -64,6 +71,10 @@ public void analyze(Analyzer analyzer) throws UserException { // disallow external catalog Util.prohibitExternalCatalog(tableName.getCtl(), this.getClass().getSimpleName()); + if (orReplace && ifNotExists) { + throw new AnalysisException("[OR REPLACE] and [IF NOT EXISTS] cannot used at the same time"); + } + // check privilege if (!Env.getCurrentEnv().getAccessManager() .checkTblPriv(ConnectContext.get(), tableName.getCtl(), tableName.getDb(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java index d06c661f9da9fc..369f4594dcab5e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java @@ -648,6 +648,7 @@ private String paramsToSql() { || fnName.getFunction().equalsIgnoreCase("sm4_decrypt") || fnName.getFunction().equalsIgnoreCase("sm4_encrypt"))) { sb.append("\'***\'"); + continue; } else if (orderByElements.size() > 0 && i == len - orderByElements.size()) { sb.append("ORDER BY "); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/KeysDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/KeysDesc.java index e7359657ef2e8a..563533ae323a89 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/KeysDesc.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/KeysDesc.java @@ -19,7 +19,6 @@ import org.apache.doris.catalog.KeysType; import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; @@ -34,7 +33,6 @@ public class KeysDesc implements Writable { private KeysType type; private List keysColumnNames; private List clusterKeysColumnNames; - private List clusterKeysColumnIds = null; public KeysDesc() { this.type = KeysType.AGG_KEYS; @@ -51,12 +49,6 @@ public KeysDesc(KeysType type, List keysColumnNames, List cluste this.clusterKeysColumnNames = clusterKeyColumnNames; } - public KeysDesc(KeysType type, List keysColumnNames, List clusterKeyColumnNames, - List clusterKeysColumnIds) { - this(type, keysColumnNames, clusterKeyColumnNames); - this.clusterKeysColumnIds = clusterKeysColumnIds; - } - public KeysType getKeysType() { return type; } @@ -69,10 +61,6 @@ public List getClusterKeysColumnNames() { return clusterKeysColumnNames; } - public List getClusterKeysColumnIds() { - return clusterKeysColumnIds; - } - public boolean containsCol(String colName) { return keysColumnNames.contains(colName); } @@ -90,17 +78,6 @@ public void analyze(List cols) throws AnalysisException { throw new AnalysisException("The number of key columns should be less than the number of columns."); } - if (clusterKeysColumnNames != null) { - if (Config.isCloudMode()) { - throw new AnalysisException("Cluster key is not supported in cloud mode"); - } - if (type != KeysType.UNIQUE_KEYS) { - throw new AnalysisException("Cluster keys only support unique keys table."); - } - clusterKeysColumnIds = Lists.newArrayList(); - analyzeClusterKeys(cols); - } - for (int i = 0; i < keysColumnNames.size(); ++i) { String name = cols.get(i).getName(); if (!keysColumnNames.get(i).equalsIgnoreCase(name)) { @@ -135,39 +112,45 @@ public void analyze(List cols) throws AnalysisException { } if (clusterKeysColumnNames != null) { - int minKeySize = keysColumnNames.size() < clusterKeysColumnNames.size() ? keysColumnNames.size() - : clusterKeysColumnNames.size(); - boolean sameKey = true; - for (int i = 0; i < minKeySize; ++i) { - if (!keysColumnNames.get(i).equalsIgnoreCase(clusterKeysColumnNames.get(i))) { - sameKey = false; - break; - } - } - if (sameKey) { - throw new AnalysisException("Unique keys and cluster keys should be different."); - } + analyzeClusterKeys(cols); } } private void analyzeClusterKeys(List cols) throws AnalysisException { - for (int i = 0; i < clusterKeysColumnNames.size(); ++i) { + if (type != KeysType.UNIQUE_KEYS) { + throw new AnalysisException("Cluster keys only support unique keys table"); + } + // check that cluster keys is not duplicated + for (int i = 0; i < clusterKeysColumnNames.size(); i++) { String name = clusterKeysColumnNames.get(i); - // check if key is duplicate for (int j = 0; j < i; j++) { if (clusterKeysColumnNames.get(j).equalsIgnoreCase(name)) { throw new AnalysisException("Duplicate cluster key column[" + name + "]."); } } - // check if key exists and generate key column ids + } + // check that cluster keys is not equal to primary keys + int minKeySize = Math.min(keysColumnNames.size(), clusterKeysColumnNames.size()); + boolean sameKey = true; + for (int i = 0; i < minKeySize; i++) { + if (!keysColumnNames.get(i).equalsIgnoreCase(clusterKeysColumnNames.get(i))) { + sameKey = false; + break; + } + } + if (sameKey) { + throw new AnalysisException("Unique keys and cluster keys should be different."); + } + // check that cluster key column exists + for (int i = 0; i < clusterKeysColumnNames.size(); i++) { + String name = clusterKeysColumnNames.get(i); for (int j = 0; j < cols.size(); j++) { if (cols.get(j).getName().equalsIgnoreCase(name)) { - cols.get(j).setClusterKeyId(clusterKeysColumnIds.size()); - clusterKeysColumnIds.add(j); + cols.get(j).setClusterKeyId(i); break; } if (j == cols.size() - 1) { - throw new AnalysisException("Key cluster column[" + name + "] doesn't exist."); + throw new AnalysisException("Cluster key column[" + name + "] doesn't exist."); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java index 132d2a86b22c23..4f3f720fae9726 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java @@ -1365,6 +1365,14 @@ private void trySetPartialUpdate() throws UserException { if (hasEmptyTargetColumns) { return; } + + boolean hasSyncMaterializedView = olapTable.getFullSchema().stream() + .anyMatch(col -> col.isMaterializedViewColumn()); + if (hasSyncMaterializedView) { + throw new UserException("Can't do partial update on merge-on-write Unique table" + + " with sync materialized view."); + } + boolean hasMissingColExceptAutoIncKey = false; for (Column col : olapTable.getFullSchema()) { boolean exists = false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetUserPropertyVar.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetUserPropertyVar.java index a2f31818b8b689..ccc189343c6463 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetUserPropertyVar.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetUserPropertyVar.java @@ -92,6 +92,13 @@ private void checkAccess(Analyzer analyzer, boolean isSelf) throws AnalysisExcep .getCloudClusterNames().contains(value)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_CLOUD_CLUSTER_ERROR, value); } + + if (key.equals(UserProperty.DEFAULT_COMPUTE_GROUP) + && !Strings.isNullOrEmpty(value) + && !((CloudSystemInfoService) Env.getCurrentSystemInfo()) + .getCloudClusterNames().contains(value)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_CLOUD_CLUSTER_ERROR, value); + } } return; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCacheHotSpotStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCacheHotSpotStmt.java index 847b015825dc1c..2c14a9f8cbbc62 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCacheHotSpotStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCacheHotSpotStmt.java @@ -43,8 +43,8 @@ public class ShowCacheHotSpotStmt extends ShowStmt implements NotFallbackInParser { public static final ShowResultSetMetaData[] RESULT_SET_META_DATAS = { ShowResultSetMetaData.builder() - .addColumn(new Column("cluster_id", ScalarType.createType(PrimitiveType.BIGINT))) - .addColumn(new Column("cluster_name", ScalarType.createVarchar(128))) + .addColumn(new Column("compute_group_id", ScalarType.createType(PrimitiveType.BIGINT))) + .addColumn(new Column("compute_group_name", ScalarType.createVarchar(128))) .addColumn(new Column("table_id", ScalarType.createType(PrimitiveType.BIGINT))) .addColumn(new Column("table_name", ScalarType.createVarchar(128))) .build(), @@ -129,7 +129,8 @@ private String generateQueryString() { + "sum(query_per_week) as query_per_week_total " + "FROM " + TABLE_NAME.toString() + " group by cluster_id, cluster_name, table_id, table_name, insert_day) "); - StringBuilder q2 = new StringBuilder("select cluster_id, cluster_name, " + StringBuilder q2 = new StringBuilder("select cluster_id as compute_group_id, " + + "cluster_name as compute_group_name, " + "table_id, table_name as hot_table_name from (select row_number() " + "over (partition by cluster_id order by insert_day desc, " + "query_per_day_total desc, query_per_week_total desc) as dr2, " diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCloudWarmUpStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCloudWarmUpStmt.java index e91e9b7d6fe251..f823aeb9c15636 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCloudWarmUpStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowCloudWarmUpStmt.java @@ -34,9 +34,9 @@ public class ShowCloudWarmUpStmt extends ShowStmt implements NotFallbackInParser private boolean showAllJobs = false; private long jobId = -1; - private static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + private static final ImmutableList WARM_UP_JOB_TITLE_NAMES = new ImmutableList.Builder() .add("JobId") - .add("ClusterName") + .add("ComputeGroup") .add("Status") .add("Type") .add("CreateTime") @@ -116,7 +116,7 @@ public String toString() { @Override public ShowResultSetMetaData getMetaData() { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); - for (String title : ShowCloudWarmUpStmt.TITLE_NAMES) { + for (String title : ShowCloudWarmUpStmt.WARM_UP_JOB_TITLE_NAMES) { builder.addColumn(new Column(title, ScalarType.createVarchar(30))); } return builder.build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java index acb6d789f45e59..c29978267a3bcd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java @@ -34,10 +34,16 @@ import com.google.common.collect.ImmutableList; public class ShowClusterStmt extends ShowStmt implements NotFallbackInParser { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("cluster").add("is_current").add("users").build(); + public static final ImmutableList CLUSTER_TITLE_NAMES = new ImmutableList.Builder() + .add("cluster").add("is_current").add("users").add("backend_num").build(); - public ShowClusterStmt() { + public static final ImmutableList COMPUTE_GROUP_TITLE_NAMES = new ImmutableList.Builder() + .add("Name").add("IsCurrent").add("Users").add("BackendNum").build(); + + boolean isComputeGroup = true; + + public ShowClusterStmt(boolean isComputeGroup) { + this.isComputeGroup = isComputeGroup; } @Override @@ -45,7 +51,11 @@ public ShowResultSetMetaData getMetaData() { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); ImmutableList titleNames = null; - titleNames = TITLE_NAMES; + if (isComputeGroup) { + titleNames = COMPUTE_GROUP_TITLE_NAMES; + } else { + titleNames = CLUSTER_TITLE_NAMES; + } for (String title : titleNames) { builder.addColumn(new Column(title, ScalarType.createVarchar(128))); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java index 1f3f19a6d29202..3f78398aafb80f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java @@ -45,6 +45,7 @@ public class ShowRolesStmt extends ShowStmt implements NotFallbackInParser { builder.addColumn(new Column("CloudStagePrivs", ScalarType.createVarchar(300))); builder.addColumn(new Column("StorageVaultPrivs", ScalarType.createVarchar(300))); builder.addColumn(new Column("WorkloadGroupPrivs", ScalarType.createVarchar(300))); + builder.addColumn(new Column("ComputeGroupPrivs", ScalarType.createVarchar(300))); META_DATA = builder.build(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStorageVaultStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStorageVaultStmt.java index f6124c4d20184d..6a06a580cec21f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStorageVaultStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStorageVaultStmt.java @@ -33,7 +33,7 @@ **/ public class ShowStorageVaultStmt extends ShowStmt implements NotFallbackInParser { - private final String stmt = "SHOW STORAGE VAULT"; + private final String stmt = "SHOW STORAGE VAULTS"; public ShowStorageVaultStmt() { } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java index 8db81c9ac10f34..7078c90f1ed5be 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SlotRef.java @@ -262,7 +262,6 @@ public String toSqlImpl() { && ConnectContext.get().getState().isNereids() && !ConnectContext.get().getState().isQuery() && ConnectContext.get().getSessionVariable() != null - && ConnectContext.get().getSessionVariable().isEnableNereidsPlanner() && desc != null) { return label + "[#" + desc.getId().asInt() + "]"; } else { diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index b499a9b525ea88..d71227049824dd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -63,6 +63,7 @@ import org.apache.doris.common.Pair; import org.apache.doris.common.io.Text; import org.apache.doris.common.util.DbUtil; +import org.apache.doris.common.util.DebugPointUtil; import org.apache.doris.common.util.DynamicPartitionUtil; import org.apache.doris.common.util.PropertyAnalyzer; import org.apache.doris.common.util.TimeUtils; @@ -99,6 +100,7 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Table.Cell; import com.google.gson.annotations.SerializedName; +import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -438,6 +440,12 @@ public void run() { checkIfNeedCancel(); if (status.ok()) { + if (state != RestoreJobState.PENDING && label.equals( + DebugPointUtil.getDebugParamOrDefault("FE.PAUSE_NON_PENDING_RESTORE_JOB", ""))) { + LOG.info("pause restore job by debug point: {}", this); + return; + } + switch (state) { case PENDING: checkAndPrepareMeta(); @@ -578,6 +586,8 @@ private void checkAndPrepareMeta() { if (isAtomicRestore) { // We will create new OlapTable in atomic restore, so does not set the RESTORE state. + // Instead, set table in atomic restore state, to forbid the alter table operation. + olapTbl.setInAtomicRestore(); continue; } @@ -642,8 +652,8 @@ private void checkAndPrepareMeta() { } } - // the new tablets -> { local tablet, schema hash }, used in atomic restore. - Map> tabletBases = null; + // the new tablets -> { local tablet, schema hash, storage medium }, used in atomic restore. + Map tabletBases = new HashMap<>(); // Check and prepare meta objects. AgentBatchTask batchTask = new AgentBatchTask(); @@ -802,7 +812,7 @@ private void checkAndPrepareMeta() { if (isAtomicRestore && localTbl != null) { // bind the backends and base tablets from local tbl. - tabletBases = bindLocalAndRemoteOlapTableReplicas((OlapTable) localTbl, remoteOlapTbl); + status = bindLocalAndRemoteOlapTableReplicas((OlapTable) localTbl, remoteOlapTbl, tabletBases); if (!status.ok()) { return; } @@ -823,16 +833,23 @@ private void checkAndPrepareMeta() { if (localTbl != null) { Preconditions.checkState(localTbl.getType() == TableType.VIEW); View localView = (View) localTbl; - if (!localView.getSignature(BackupHandler.SIGNATURE_VERSION) - .equals(remoteView.getSignature(BackupHandler.SIGNATURE_VERSION))) { - status = new Status(ErrCode.COMMON_ERROR, "View " - + jobInfo.getAliasByOriginNameIfSet(backupViewName) - + " already exist but with different schema"); - return; + String localViewSignature = localView.getSignature(BackupHandler.SIGNATURE_VERSION); + // keep compatible with old version, compare the signature without reset view def + if (!localViewSignature.equals(remoteView.getSignature(BackupHandler.SIGNATURE_VERSION))) { + // reset view def to dest db name and compare signature again + String srcDbName = jobInfo.dbName; + remoteView.resetViewDefForRestore(srcDbName, db.getName()); + if (!localViewSignature.equals(remoteView.getSignature(BackupHandler.SIGNATURE_VERSION))) { + status = new Status(ErrCode.COMMON_ERROR, "View " + + jobInfo.getAliasByOriginNameIfSet(backupViewName) + + " already exist but with different schema"); + return; + } } } else { String srcDbName = jobInfo.dbName; - remoteView.resetIdsForRestore(env, srcDbName, db.getFullName()); + remoteView.resetViewDefForRestore(srcDbName, db.getName()); + remoteView.resetIdsForRestore(env); restoredTbls.add(remoteView); } } @@ -886,7 +903,7 @@ private void checkAndPrepareMeta() { if (restoreTbl.getType() == TableType.OLAP) { OlapTable restoreOlapTable = (OlapTable) restoreTbl; for (Partition restorePart : restoreOlapTable.getPartitions()) { - createReplicas(db, batchTask, restoreOlapTable, restorePart); + createReplicas(db, batchTask, restoreOlapTable, restorePart, tabletBases); BackupOlapTableInfo backupOlapTableInfo = jobInfo.getOlapTableInfo(restoreOlapTable.getName()); genFileMapping(restoreOlapTable, restorePart, backupOlapTableInfo.id, backupOlapTableInfo.getPartInfo(restorePart.getName()), @@ -899,7 +916,7 @@ private void checkAndPrepareMeta() { if (Env.isStoredTableNamesLowerCase()) { tableName = tableName.toLowerCase(); } - if (isAtomicRestore) { + if (restoreTbl.getType() == TableType.OLAP && isAtomicRestore) { tableName = tableAliasWithAtomicRestore(tableName); } restoreTbl.setName(tableName); @@ -1025,17 +1042,32 @@ private void checkAndPrepareMeta() { // No log here, PENDING state restore job will redo this method } - private Map> bindLocalAndRemoteOlapTableReplicas( - OlapTable localOlapTbl, OlapTable remoteOlapTbl) { - Map> tabletBases = new HashMap<>(); - + private Status bindLocalAndRemoteOlapTableReplicas( + OlapTable localOlapTbl, OlapTable remoteOlapTbl, + Map tabletBases) { localOlapTbl.readLock(); try { + // The storage medium of the remote olap table's storage is HDD, because we want to + // restore the tables in another cluster might without SSD. + // + // Keep the storage medium of the new olap table the same as the old one, so that + // the replicas in the new olap table will not be migrated to other storage mediums. + remoteOlapTbl.setStorageMedium(localOlapTbl.getStorageMedium()); for (Partition partition : remoteOlapTbl.getPartitions()) { Partition localPartition = localOlapTbl.getPartition(partition.getName()); if (localPartition == null) { continue; } + // Since the replicas are bound to the same disk, the storage medium must be the same + // to avoid media migration. + TStorageMedium storageMedium = localOlapTbl.getPartitionInfo() + .getDataProperty(localPartition.getId()).getStorageMedium(); + remoteOlapTbl.getPartitionInfo().getDataProperty(partition.getId()) + .setStorageMedium(storageMedium); + if (LOG.isDebugEnabled()) { + LOG.debug("bind local partition {} and remote partition {} with same storage medium {}, name: {}", + localPartition.getId(), partition.getId(), storageMedium, partition.getName()); + } for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { String indexName = remoteOlapTbl.getIndexNameById(index.getId()); Long localIndexId = localOlapTbl.getIndexIdByName(indexName); @@ -1045,22 +1077,20 @@ private Map> bindLocalAndRemoteOlapTableReplicas( } int schemaHash = localOlapTbl.getSchemaHashByIndexId(localIndexId); if (schemaHash == -1) { - status = new Status(ErrCode.COMMON_ERROR, String.format( + return new Status(ErrCode.COMMON_ERROR, String.format( "schema hash of local index %d is not found, remote table=%d, remote index=%d, " + "local table=%d, local index=%d", localIndexId, remoteOlapTbl.getId(), index.getId(), localOlapTbl.getId(), localIndexId)); - return null; } List localTablets = localIndex.getTablets(); List remoteTablets = index.getTablets(); if (localTablets.size() != remoteTablets.size()) { - status = new Status(ErrCode.COMMON_ERROR, String.format( + return new Status(ErrCode.COMMON_ERROR, String.format( "the size of local tablet %s is not equals to the remote %s, " + "is_atomic_restore=true, remote table=%d, remote index=%d, " + "local table=%d, local index=%d", localTablets.size(), remoteTablets.size(), remoteOlapTbl.getId(), index.getId(), localOlapTbl.getId(), localIndexId)); - return null; } for (int i = 0; i < remoteTablets.size(); i++) { Tablet localTablet = localTablets.get(i); @@ -1068,14 +1098,13 @@ private Map> bindLocalAndRemoteOlapTableReplicas( List localReplicas = localTablet.getReplicas(); List remoteReplicas = remoteTablet.getReplicas(); if (localReplicas.size() != remoteReplicas.size()) { - status = new Status(ErrCode.COMMON_ERROR, String.format( + return new Status(ErrCode.COMMON_ERROR, String.format( "the size of local replicas %s is not equals to the remote %s, " + "is_atomic_restore=true, remote table=%d, remote index=%d, " + "local table=%d, local index=%d, local replicas=%d, remote replicas=%d", localTablets.size(), remoteTablets.size(), remoteOlapTbl.getId(), index.getId(), localOlapTbl.getId(), localIndexId, localReplicas.size(), remoteReplicas.size())); - return null; } for (int j = 0; j < remoteReplicas.size(); j++) { long backendId = localReplicas.get(j).getBackendId(); @@ -1086,14 +1115,15 @@ private Map> bindLocalAndRemoteOlapTableReplicas( localOlapTbl.getName()); } } - tabletBases.put(remoteTablet.getId(), Pair.of(localTablet.getId(), schemaHash)); + tabletBases.put(remoteTablet.getId(), + new TabletRef(localTablet.getId(), schemaHash, storageMedium)); } } } } finally { localOlapTbl.readUnlock(); } - return tabletBases; + return Status.OK; } private void prepareAndSendSnapshotTaskForOlapTable(Database db) { @@ -1217,7 +1247,7 @@ private void createReplicas(Database db, AgentBatchTask batchTask, OlapTable loc } private void createReplicas(Database db, AgentBatchTask batchTask, OlapTable localTbl, Partition restorePart, - Map> tabletBases) { + Map tabletBases) { Set bfColumns = localTbl.getCopiedBfColumns(); double bfFpp = localTbl.getBfFpp(); @@ -1234,9 +1264,17 @@ private void createReplicas(Database db, AgentBatchTask batchTask, OlapTable loc MaterializedIndexMeta indexMeta = localTbl.getIndexMetaByIndexId(restoredIdx.getId()); List indexes = restoredIdx.getId() == localTbl.getBaseIndexId() ? localTbl.getCopiedIndexes() : null; + List clusterKeyIndexes = null; + if (indexMeta.getIndexId() == localTbl.getBaseIndexId() || localTbl.isShadowIndex(indexMeta.getIndexId())) { + clusterKeyIndexes = OlapTable.getClusterKeyIndexes(indexMeta.getSchema()); + } for (Tablet restoreTablet : restoredIdx.getTablets()) { + TabletRef baseTabletRef = tabletBases == null ? null : tabletBases.get(restoreTablet.getId()); + // All restored replicas will be saved to HDD by default. + TStorageMedium storageMedium = baseTabletRef == null + ? TStorageMedium.HDD : baseTabletRef.storageMedium; TabletMeta tabletMeta = new TabletMeta(db.getId(), localTbl.getId(), restorePart.getId(), - restoredIdx.getId(), indexMeta.getSchemaHash(), TStorageMedium.HDD); + restoredIdx.getId(), indexMeta.getSchemaHash(), storageMedium); Env.getCurrentInvertedIndex().addTablet(restoreTablet.getId(), tabletMeta); for (Replica restoreReplica : restoreTablet.getReplicas()) { Env.getCurrentInvertedIndex().addReplica(restoreTablet.getId(), restoreReplica); @@ -1245,7 +1283,7 @@ private void createReplicas(Database db, AgentBatchTask batchTask, OlapTable loc restoreTablet.getId(), restoreReplica.getId(), indexMeta.getShortKeyColumnCount(), indexMeta.getSchemaHash(), restoreReplica.getVersion(), indexMeta.getKeysType(), TStorageType.COLUMN, - TStorageMedium.HDD /* all restored replicas will be saved to HDD */, + storageMedium, indexMeta.getSchema(), bfColumns, bfFpp, null, indexes, localTbl.isInMemory(), @@ -1270,10 +1308,17 @@ private void createReplicas(Database db, AgentBatchTask batchTask, OlapTable loc localTbl.variantEnableFlattenNested()); task.setInvertedIndexFileStorageFormat(localTbl.getInvertedIndexFileStorageFormat()); task.setInRestoreMode(true); - if (tabletBases != null && tabletBases.containsKey(restoreTablet.getId())) { + if (baseTabletRef != null) { // ensure this replica is bound to the same backend disk as the origin table's replica. - Pair baseTablet = tabletBases.get(restoreTablet.getId()); - task.setBaseTablet(baseTablet.first, baseTablet.second); + task.setBaseTablet(baseTabletRef.tabletId, baseTabletRef.schemaHash); + LOG.info("set base tablet {} for replica {} in restore job {}, tablet id={}", + baseTabletRef.tabletId, restoreReplica.getId(), jobId, restoreTablet.getId()); + } + if (!CollectionUtils.isEmpty(clusterKeyIndexes)) { + task.setClusterKeyIndexes(clusterKeyIndexes); + LOG.info("table: {}, partition: {}, index: {}, tablet: {}, cluster key indexes: {}", + localTbl.getId(), restorePart.getId(), restoredIdx.getId(), restoreTablet.getId(), + clusterKeyIndexes); } batchTask.addTask(task); } @@ -1361,7 +1406,7 @@ private void genFileMapping(OlapTable localTbl, Partition localPartition, Long r } private void genFileMapping(OlapTable localTbl, Partition localPartition, Long remoteTblId, - BackupPartitionInfo backupPartInfo, boolean overwrite, Map> tabletBases) { + BackupPartitionInfo backupPartInfo, boolean overwrite, Map tabletBases) { for (MaterializedIndex localIdx : localPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { if (LOG.isDebugEnabled()) { LOG.debug("get index id: {}, index name: {}", localIdx.getId(), @@ -1378,7 +1423,11 @@ private void genFileMapping(OlapTable localTbl, Partition localPartition, Long r for (Replica localReplica : localTablet.getReplicas()) { long refTabletId = -1L; if (tabletBases != null && tabletBases.containsKey(localTablet.getId())) { - refTabletId = tabletBases.get(localTablet.getId()).first; + refTabletId = tabletBases.get(localTablet.getId()).tabletId; + if (LOG.isDebugEnabled()) { + LOG.debug("restored tablet {} is based on exists tablet {}", + localTablet.getId(), refTabletId); + } } long noReplicaId = -1L; @@ -1426,11 +1475,6 @@ private void replayCheckAndPrepareMeta() { // replay set all existing tables's state to RESTORE for (String tableName : jobInfo.backupOlapTableObjects.keySet()) { - if (isAtomicRestore) { - // Atomic restore will creates new replica of the OlapTable. - continue; - } - Table tbl = db.getTableNullable(jobInfo.getAliasByOriginNameIfSet(tableName)); if (tbl == null) { continue; @@ -1438,6 +1482,12 @@ private void replayCheckAndPrepareMeta() { OlapTable olapTbl = (OlapTable) tbl; tbl.writeLock(); try { + if (isAtomicRestore) { + // Atomic restore will creates new replica of the OlapTable. + olapTbl.setInAtomicRestore(); + continue; + } + olapTbl.setState(OlapTableState.RESTORE); // set restore status for partitions BackupOlapTableInfo tblInfo = jobInfo.backupOlapTableObjects.get(tableName); @@ -2034,22 +2084,28 @@ private Status allTabletCommitted(boolean isReplay) { } private Status dropAllNonRestoredTableAndPartitions(Database db) { + Set restoredViews = jobInfo.newBackupObjects.views.stream() + .map(view -> view.name).collect(Collectors.toSet()); + try { for (Table table : db.getTables()) { long tableId = table.getId(); String tableName = table.getName(); TableType tableType = table.getType(); - BackupOlapTableInfo backupTableInfo = jobInfo.backupOlapTableObjects.get(tableName); - if (tableType != TableType.OLAP && tableType != TableType.ODBC && tableType != TableType.VIEW) { - continue; - } - if (tableType == TableType.OLAP && backupTableInfo != null) { - // drop the non restored partitions. - dropNonRestoredPartitions(db, (OlapTable) table, backupTableInfo); - } else if (isCleanTables) { - // otherwise drop the entire table. - LOG.info("drop non restored table {}({}). {}", tableName, tableId, this); - boolean isForceDrop = false; // move this table into recyclebin. + if (tableType == TableType.OLAP) { + BackupOlapTableInfo backupTableInfo = jobInfo.backupOlapTableObjects.get(tableName); + if (tableType == TableType.OLAP && backupTableInfo != null) { + // drop the non restored partitions. + dropNonRestoredPartitions(db, (OlapTable) table, backupTableInfo); + } else if (isCleanTables) { + // otherwise drop the entire table. + LOG.info("drop non restored table {}, table id: {}. {}", tableName, tableId, this); + boolean isForceDrop = false; // move this table into recyclebin. + env.getInternalCatalog().dropTableWithoutCheck(db, table, isForceDrop); + } + } else if (tableType == TableType.VIEW && isCleanTables && !restoredViews.contains(tableName)) { + LOG.info("drop non restored view {}, table id: {}. {}", tableName, tableId, this); + boolean isForceDrop = false; // move this view into recyclebin. env.getInternalCatalog().dropTableWithoutCheck(db, table, isForceDrop); } } @@ -2208,6 +2264,12 @@ public void cancelInternal(boolean isReplay) { // remove restored tbls for (Table restoreTbl : restoredTbls) { + if (isAtomicRestore && restoreTbl.getType() == TableType.OLAP + && !restoreTbl.getName().startsWith(ATOMIC_RESTORE_TABLE_PREFIX)) { + // In atomic restore, a table registered to db must have a name with the prefix, + // otherwise, it has not been registered and can be ignored here. + continue; + } LOG.info("remove restored table when cancelled: {}", restoreTbl.getName()); if (db.writeLockIfExist()) { try { @@ -2386,6 +2448,10 @@ private void setTableStateToNormalAndUpdateProperties(Database db, boolean commi LOG.info("table {} set state from {} to normal", tableName, olapTbl.getState()); olapTbl.setState(OlapTableState.NORMAL); } + if (olapTbl.isInAtomicRestore()) { + olapTbl.clearInAtomicRestore(); + LOG.info("table {} set state from atomic restore to normal", tableName); + } BackupOlapTableInfo tblInfo = jobInfo.backupOlapTableObjects.get(tableName); for (Map.Entry partitionEntry : tblInfo.partitions.entrySet()) { @@ -2529,7 +2595,19 @@ public String toString() { return sb.toString(); } - private String tableAliasWithAtomicRestore(String tableName) { + public static String tableAliasWithAtomicRestore(String tableName) { return ATOMIC_RESTORE_TABLE_PREFIX + tableName; } + + private static class TabletRef { + public long tabletId; + public int schemaHash; + public TStorageMedium storageMedium; + + TabletRef(long tabletId, int schemaHash, TStorageMedium storageMedium) { + this.tabletId = tabletId; + this.schemaHash = schemaHash; + this.storageMedium = storageMedium; + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java index 6fc2e3f813fb07..3daad2a0ed9b58 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/binlog/BinlogManager.java @@ -33,6 +33,7 @@ import org.apache.doris.persist.ReplacePartitionOperationLog; import org.apache.doris.persist.TableAddOrDropColumnsInfo; import org.apache.doris.persist.TableInfo; +import org.apache.doris.persist.TableRenameColumnInfo; import org.apache.doris.persist.TruncateTableInfo; import org.apache.doris.thrift.TBinlog; import org.apache.doris.thrift.TBinlogType; @@ -330,6 +331,17 @@ public void addTableRename(TableInfo info, long commitSeq) { addBinlog(dbId, tableIds, commitSeq, timestamp, type, data, false, info); } + public void addColumnRename(TableRenameColumnInfo info, long commitSeq) { + long dbId = info.getDbId(); + List tableIds = Lists.newArrayList(); + tableIds.add(info.getTableId()); + long timestamp = -1; + TBinlogType type = TBinlogType.RENAME_COLUMN; + String data = info.toJson(); + + addBinlog(dbId, tableIds, commitSeq, timestamp, type, data, false, info); + } + // get binlog by dbId, return first binlog.version > version public Pair getBinlog(long dbId, long tableId, long prevCommitSeq) { TStatus status = new TStatus(TStatusCode.OK); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java index bd0b9fe4e877bf..3874f38145a66e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java @@ -183,6 +183,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.Fpow; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromBase64; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromDays; +import org.apache.doris.nereids.trees.expressions.functions.scalar.FromIso8601Date; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromMicrosecond; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromMillisecond; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromSecond; @@ -238,6 +239,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonObject; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonQuote; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonReplace; +import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonSearch; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonSet; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonUnQuote; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonbExistsPath; @@ -265,6 +267,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.L1Distance; import org.apache.doris.nereids.trees.expressions.functions.scalar.L2Distance; import org.apache.doris.nereids.trees.expressions.functions.scalar.LastDay; +import org.apache.doris.nereids.trees.expressions.functions.scalar.LastQueryId; import org.apache.doris.nereids.trees.expressions.functions.scalar.Least; import org.apache.doris.nereids.trees.expressions.functions.scalar.Left; import org.apache.doris.nereids.trees.expressions.functions.scalar.Length; @@ -318,6 +321,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.Negative; import org.apache.doris.nereids.trees.expressions.functions.scalar.NgramSearch; import org.apache.doris.nereids.trees.expressions.functions.scalar.NonNullable; +import org.apache.doris.nereids.trees.expressions.functions.scalar.NormalCdf; import org.apache.doris.nereids.trees.expressions.functions.scalar.NotNullOrEmpty; import org.apache.doris.nereids.trees.expressions.functions.scalar.Now; import org.apache.doris.nereids.trees.expressions.functions.scalar.NullIf; @@ -428,15 +432,18 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIpv6; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIpv6OrDefault; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIpv6OrNull; +import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIso8601; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToMonday; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToQuantileState; import org.apache.doris.nereids.trees.expressions.functions.scalar.Tokenize; +import org.apache.doris.nereids.trees.expressions.functions.scalar.Translate; import org.apache.doris.nereids.trees.expressions.functions.scalar.Trim; import org.apache.doris.nereids.trees.expressions.functions.scalar.Truncate; import org.apache.doris.nereids.trees.expressions.functions.scalar.Unhex; import org.apache.doris.nereids.trees.expressions.functions.scalar.UnixTimestamp; import org.apache.doris.nereids.trees.expressions.functions.scalar.Upper; import org.apache.doris.nereids.trees.expressions.functions.scalar.UrlDecode; +import org.apache.doris.nereids.trees.expressions.functions.scalar.UrlEncode; import org.apache.doris.nereids.trees.expressions.functions.scalar.User; import org.apache.doris.nereids.trees.expressions.functions.scalar.UtcTimestamp; import org.apache.doris.nereids.trees.expressions.functions.scalar.Uuid; @@ -638,6 +645,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(Fpow.class, "fpow"), scalar(FromBase64.class, "from_base64"), scalar(FromDays.class, "from_days"), + scalar(FromIso8601Date.class, "from_iso8601_date"), scalar(FromUnixtime.class, "from_unixtime"), scalar(G.class, "g"), scalar(GetJsonBigInt.class, "get_json_bigint"), @@ -728,6 +736,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(JsonbParseNullableErrorToNull.class, "jsonb_parse_nullable_error_to_null"), scalar(JsonbParseNullableErrorToValue.class, "json_parse_nullable_error_to_value"), scalar(JsonbParseNullableErrorToValue.class, "jsonb_parse_nullable_error_to_value"), + scalar(JsonSearch.class, "json_search"), scalar(JsonbValid.class, "json_valid"), scalar(JsonbValid.class, "jsonb_valid"), scalar(JsonbType.class, "json_type"), @@ -789,6 +798,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(MurmurHash364.class, "murmur_hash3_64"), scalar(Negative.class, "negative"), scalar(NonNullable.class, "non_nullable"), + scalar(NormalCdf.class, "normal_cdf"), scalar(NotNullOrEmpty.class, "not_null_or_empty"), scalar(NgramSearch.class, "ngram_search"), scalar(Now.class, "now", "current_timestamp", "localtime", "localtimestamp"), @@ -904,9 +914,11 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(ToIpv6.class, "to_ipv6"), scalar(ToIpv6OrDefault.class, "to_ipv6_or_default"), scalar(ToIpv6OrNull.class, "to_ipv6_or_null"), + scalar(ToIso8601.class, "to_iso8601"), scalar(Tokenize.class, "tokenize"), scalar(ToMonday.class, "to_monday"), scalar(ToQuantileState.class, "to_quantile_state"), + scalar(Translate.class, "translate"), scalar(Trim.class, "trim"), scalar(Truncate.class, "truncate"), scalar(Unhex.class, "unhex"), @@ -914,6 +926,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(Upper.class, "ucase", "upper"), scalar(Quote.class, "quote"), scalar(UrlDecode.class, "url_decode"), + scalar(UrlEncode.class, "url_encode"), scalar(User.class, "user"), scalar(UtcTimestamp.class, "utc_timestamp"), scalar(Uuid.class, "uuid"), @@ -940,7 +953,8 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(YearsDiff.class, "years_diff"), scalar(YearsSub.class, "years_sub"), scalar(MultiMatch.class, "multi_match"), - scalar(SessionUser.class, "session_user")); + scalar(SessionUser.class, "session_user"), + scalar(LastQueryId.class, "last_query_id")); public static final BuiltinScalarFunctions INSTANCE = new BuiltinScalarFunctions(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java index fc69c31f0e98f3..7e0ab33aa7c14d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java @@ -1181,4 +1181,10 @@ public GeneratedColumnInfo getGeneratedColumnInfo() { public Set getGeneratedColumnsThatReferToThis() { return generatedColumnsThatReferToThis; } + + public void setDefaultValueInfo(Column refColumn) { + this.defaultValue = refColumn.defaultValue; + this.defaultValueExprDef = refColumn.defaultValueExprDef; + this.realDefaultValue = refColumn.realDefaultValue; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java index 3a68fa0b264c5d..4c4c3089babd98 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @@ -79,6 +79,7 @@ import org.apache.doris.analysis.TruncateTableStmt; import org.apache.doris.analysis.UninstallPluginStmt; import org.apache.doris.backup.BackupHandler; +import org.apache.doris.backup.RestoreJob; import org.apache.doris.binlog.BinlogGcer; import org.apache.doris.binlog.BinlogManager; import org.apache.doris.blockrule.SqlBlockRuleMgr; @@ -289,6 +290,7 @@ import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.transaction.DbUsedDataQuotaInfoCollector; +import org.apache.doris.transaction.GlobalExternalTransactionInfoMgr; import org.apache.doris.transaction.GlobalTransactionMgrIface; import org.apache.doris.transaction.PublishVersionDaemon; @@ -567,6 +569,8 @@ public class Env { private final SplitSourceManager splitSourceManager; + private final GlobalExternalTransactionInfoMgr globalExternalTransactionInfoMgr; + private final List forceSkipJournalIds = Arrays.asList(Config.force_skip_journal_ids); // if a config is relative to a daemon thread. record the relation here. we will proactively change interval of it. @@ -815,6 +819,7 @@ public Env(boolean isCheckpointCatalog) { this.dnsCache = new DNSCache(); this.sqlCacheManager = new NereidsSqlCacheManager(); this.splitSourceManager = new SplitSourceManager(); + this.globalExternalTransactionInfoMgr = new GlobalExternalTransactionInfoMgr(); } public static void destroyCheckpoint() { @@ -1150,6 +1155,17 @@ protected boolean isStartFromEmpty() { return !roleFile.exists() && !versionFile.exists(); } + private void getClusterIdFromStorage(Storage storage) throws IOException { + clusterId = storage.getClusterID(); + if (Config.cluster_id != -1 && Config.cluster_id != this.clusterId) { + LOG.warn("Configured cluster_id {} does not match stored cluster_id {}. " + + "This may indicate a configuration error.", + Config.cluster_id, this.clusterId); + throw new IOException("Configured cluster_id does not match stored cluster_id. " + + "Please check your configuration."); + } + } + protected void getClusterIdAndRole() throws IOException { File roleFile = new File(this.imageDir, Storage.ROLE_FILE); File versionFile = new File(this.imageDir, Storage.VERSION_FILE); @@ -1231,7 +1247,7 @@ protected void getClusterIdAndRole() throws IOException { frontends.put(nodeName, self); LOG.info("add self frontend: {}", self); } else { - clusterId = storage.getClusterID(); + getClusterIdFromStorage(storage); if (storage.getToken() == null) { token = Strings.isNullOrEmpty(Config.auth_token) ? Storage.newToken() : Config.auth_token; LOG.info("refresh new token"); @@ -1286,7 +1302,7 @@ protected void getClusterIdAndRole() throws IOException { // NOTE: cluster_id will be init when Storage object is constructed, // so we new one. storage = new Storage(this.imageDir); - clusterId = storage.getClusterID(); + getClusterIdFromStorage(storage); token = storage.getToken(); if (Strings.isNullOrEmpty(token)) { token = Config.auth_token; @@ -1294,7 +1310,7 @@ protected void getClusterIdAndRole() throws IOException { } else { // If the version file exist, read the cluster id and check the // id with helper node to make sure they are identical - clusterId = storage.getClusterID(); + getClusterIdFromStorage(storage); token = storage.getToken(); try { String url = "http://" + NetUtils @@ -2060,7 +2076,7 @@ public boolean hasReplayer() { public void loadImage(String imageDir) throws IOException, DdlException { Storage storage = new Storage(imageDir); - clusterId = storage.getClusterID(); + getClusterIdFromStorage(storage); File curFile = storage.getCurrentImageFile(); if (!curFile.exists()) { // image.0 may not exist @@ -2995,6 +3011,11 @@ public void addFrontend(FrontendNodeType role, String host, int editLogPort) thr } public void addFrontend(FrontendNodeType role, String host, int editLogPort, String nodeName) throws DdlException { + addFrontend(role, host, editLogPort, nodeName, ""); + } + + public void addFrontend(FrontendNodeType role, String host, int editLogPort, String nodeName, String cloudUniqueId) + throws DdlException { if (!tryLock(false)) { throw new DdlException("Failed to acquire env lock. Try again"); } @@ -3025,6 +3046,7 @@ public void addFrontend(FrontendNodeType role, String host, int editLogPort, Str // Only add frontend after removing the conflict nodes, to ensure the exception safety. fe = new Frontend(role, nodeName, host, editLogPort); + fe.setCloudUniqueId(cloudUniqueId); frontends.put(nodeName, fe); LOG.info("add frontend: {}", fe); @@ -3075,7 +3097,6 @@ public void dropFrontend(FrontendNodeType role, String host, int port) throws Dd } public void dropFrontendFromBDBJE(FrontendNodeType role, String host, int port) throws DdlException { - if (port == selfNode.getPort() && feType == FrontendNodeType.MASTER && selfNode.getHost().equals(host)) { throw new DdlException("can not drop current master node."); @@ -3681,6 +3702,10 @@ private static void addOlapTablePropertyInfo(OlapTable olapTable, StringBuilder .append("\" = \""); sb.append(olapTable.isDuplicateWithoutKey()).append("\""); } + + if (olapTable.isInAtomicRestore()) { + sb.append(",\n\"").append(PropertyAnalyzer.PROPERTIES_IN_ATOMIC_RESTORE).append("\" = \"true\""); + } } /** @@ -4707,6 +4732,9 @@ public void renameTable(Database db, Table table, String newTableName) throws Dd if (db.getTable(newTableName).isPresent()) { throw new DdlException("Table name[" + newTableName + "] is already used"); } + if (db.getTable(RestoreJob.tableAliasWithAtomicRestore(newTableName)).isPresent()) { + throw new DdlException("Table name[" + newTableName + "] is already used (in restoring)"); + } if (table.isManagedTable()) { // olap table should also check if any rollup has same name as "newTableName" @@ -5131,11 +5159,7 @@ private void renameColumn(Database db, OlapTable table, String colName, indexIdToSchemaVersion); editLog.logColumnRename(info); LOG.info("rename coloumn[{}] to {}", colName, newColName); - try { - Env.getCurrentEnv().getAnalysisManager().dropStats(table, null); - } catch (Exception e) { - LOG.info("Failed to drop stats after rename column. Reason: {}", e.getMessage()); - } + Env.getCurrentEnv().getAnalysisManager().dropStats(table, null); } } @@ -5546,33 +5570,49 @@ public void createView(CreateViewStmt stmt) throws DdlException { Database db = getInternalCatalog().getDbOrDdlException(dbName); // check if table exists in db + boolean replace = false; if (db.getTable(tableName).isPresent()) { if (stmt.isSetIfNotExists()) { LOG.info("create view[{}] which already exists", tableName); return; + } else if (stmt.isSetOrReplace()) { + replace = true; + LOG.info("view[{}] already exists, need to replace it", tableName); } else { ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); } } - List columns = stmt.getColumns(); - - long tableId = Env.getCurrentEnv().getNextId(); - View newView = new View(tableId, tableName, columns); - newView.setComment(stmt.getComment()); - newView.setInlineViewDefWithSqlMode(stmt.getInlineViewDef(), - ConnectContext.get().getSessionVariable().getSqlMode()); - // init here in case the stmt string from view.toSql() has some syntax error. - try { - newView.init(); - } catch (UserException e) { - throw new DdlException("failed to init view stmt, reason=" + e.getMessage()); - } + if (replace) { + AlterViewStmt alterViewStmt = new AlterViewStmt(stmt.getTableName(), stmt.getColWithComments(), + stmt.getViewDefStmt()); + alterViewStmt.setInlineViewDef(stmt.getInlineViewDef()); + try { + alterView(alterViewStmt); + } catch (UserException e) { + throw new DdlException("failed to replace view[" + tableName + "], reason=" + e.getMessage()); + } + LOG.info("successfully replace view[{}]", tableName); + } else { + List columns = stmt.getColumns(); + + long tableId = Env.getCurrentEnv().getNextId(); + View newView = new View(tableId, tableName, columns); + newView.setComment(stmt.getComment()); + newView.setInlineViewDefWithSqlMode(stmt.getInlineViewDef(), + ConnectContext.get().getSessionVariable().getSqlMode()); + // init here in case the stmt string from view.toSql() has some syntax error. + try { + newView.init(); + } catch (UserException e) { + throw new DdlException("failed to init view stmt, reason=" + e.getMessage()); + } - if (!((Database) db).createTableWithLock(newView, false, stmt.isSetIfNotExists()).first) { - throw new DdlException("Failed to create view[" + tableName + "]."); + if (!((Database) db).createTableWithLock(newView, false, stmt.isSetIfNotExists()).first) { + throw new DdlException("Failed to create view[" + tableName + "]."); + } + LOG.info("successfully create view[" + tableName + "-" + newView.getId() + "]"); } - LOG.info("successfully create view[" + tableName + "-" + newView.getId() + "]"); } public FunctionRegistry getFunctionRegistry() { @@ -6555,6 +6595,10 @@ public SplitSourceManager getSplitSourceManager() { return splitSourceManager; } + public GlobalExternalTransactionInfoMgr getGlobalExternalTransactionInfoMgr() { + return globalExternalTransactionInfoMgr; + } + public StatisticsJobAppender getStatisticsJobAppender() { return statisticsJobAppender; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MTMV.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MTMV.java index f93ecc9475c643..bbfed9cf82ec5f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MTMV.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MTMV.java @@ -59,6 +59,7 @@ import java.util.Map.Entry; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -299,7 +300,13 @@ public MTMVCache getOrGenerateCache(ConnectContext connectionContext) throws Ana } // Concurrent situations may result in duplicate cache generation, // but we tolerate this in order to prevent nested use of readLock and write MvLock for the table - MTMVCache mtmvCache = MTMVCache.from(this, connectionContext, true); + MTMVCache mtmvCache; + try { + // Should new context with ADMIN user + mtmvCache = MTMVCache.from(this, MTMVPlanUtil.createMTMVContext(this), true); + } finally { + connectionContext.setThreadLocalInfo(); + } writeMvLock(); try { this.cache = mtmvCache; @@ -410,6 +417,10 @@ public Map> calculatePartitionMappings() throws AnalysisExce return res; } + public ConcurrentLinkedQueue getHistoryTasks() { + return jobInfo.getHistoryTasks(); + } + // for test public void setRefreshInfo(MTMVRefreshInfo refreshInfo) { this.refreshInfo = refreshInfo; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index 2f5eb35ad757fb..07916231118863 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -110,6 +110,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; @@ -790,7 +791,7 @@ public Status resetIdsForRestore(Env env, Database db, ReplicaAllocation restore baseIndexId = newIdxId; } MaterializedIndexMeta indexMeta = origIdxIdToMeta.get(entry.getKey()); - indexMeta.resetIndexIdForRestore(newIdxId, srcDbName, db.getFullName()); + indexMeta.resetIndexIdForRestore(newIdxId, srcDbName, db.getName()); indexIdToMeta.put(newIdxId, indexMeta); indexNameToId.put(entry.getValue(), newIdxId); } @@ -1420,7 +1421,7 @@ public void setSequenceMapCol(String colName) { getOrCreatTableProperty().setSequenceMapCol(colName); } - public void setSequenceInfo(Type type) { + public void setSequenceInfo(Type type, Column refColumn) { this.hasSequenceCol = true; this.sequenceType = type; @@ -1434,6 +1435,9 @@ public void setSequenceInfo(Type type) { // unique key table sequenceCol = ColumnDef.newSequenceColumnDef(type, AggregateType.REPLACE).toColumn(); } + if (refColumn != null) { + sequenceCol.setDefaultValueInfo(refColumn); + } // add sequence column at last fullSchema.add(sequenceCol); nameToColumn.put(Column.SEQUENCE_COL, sequenceCol); @@ -1567,6 +1571,9 @@ public long fetchRowCount() { return getRowCountForIndex(baseIndexId, false); } + /** + * @return -1 if there are some tablets whose row count is not reported to FE + */ public long getRowCountForIndex(long indexId, boolean strict) { long rowCount = 0; for (Map.Entry entry : idToPartition.entrySet()) { @@ -1861,6 +1868,17 @@ public void gsonPostProcess() throws IOException { if (isAutoBucket()) { defaultDistributionInfo.markAutoBucket(); } + if (isUniqKeyMergeOnWrite() && getSequenceMapCol() != null) { + // set the hidden sequence column's default value the same with + // the sequence map column's for partial update + String seqMapColName = getSequenceMapCol(); + Column seqMapCol = getBaseSchema().stream().filter(col -> col.getName().equalsIgnoreCase(seqMapColName)) + .findFirst().orElse(null); + Column hiddenSeqCol = getSequenceCol(); + if (seqMapCol != null && hiddenSeqCol != null) { + hiddenSeqCol.setDefaultValueInfo(seqMapCol); + } + } RangePartitionInfo tempRangeInfo = tempPartitions.getPartitionInfo(); if (tempRangeInfo != null) { for (long partitionId : tempRangeInfo.getIdToItem(false).keySet()) { @@ -1988,6 +2006,10 @@ public void checkNormalStateForAlter() throws DdlException { throw new DdlException("Table[" + name + "]'s state(" + state.toString() + ") is not NORMAL. Do not allow doing ALTER ops"); } + if (tableProperty != null && tableProperty.isInAtomicRestore()) { + throw new DdlException("Table[" + name + "] is in atomic restore state. " + + "Do not allow doing ALTER ops"); + } } public boolean isStable(SystemInfoService infoService, TabletScheduler tabletScheduler) { @@ -2250,6 +2272,21 @@ public boolean containsPartition(String partitionName) { return nameToPartition.containsKey(partitionName); } + public void setInAtomicRestore() { + getOrCreatTableProperty().setInAtomicRestore().buildInAtomicRestore(); + } + + public void clearInAtomicRestore() { + getOrCreatTableProperty().clearInAtomicRestore().buildInAtomicRestore(); + } + + public boolean isInAtomicRestore() { + if (tableProperty != null) { + return tableProperty.isInAtomicRestore(); + } + return false; + } + public long getTTLSeconds() { if (tableProperty != null) { return tableProperty.getTTLSeconds(); @@ -3112,6 +3149,16 @@ private static List getVisibleVersionFromMeta(List dbIds, List } } + public static List getClusterKeyIndexes(List columns) { + Map clusterKeyIndexes = new TreeMap<>(); + for (Column column : columns) { + if (column.isClusterKey()) { + clusterKeyIndexes.put(column.getClusterKeyId(), column.getUniqueId()); + } + } + return clusterKeyIndexes.isEmpty() ? null : new ArrayList<>(clusterKeyIndexes.values()); + } + public long getVisibleVersionTime() { return tableAttributes.getVisibleVersionTime(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java index fe285b6919db1a..11867bcfb960a6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java @@ -189,8 +189,8 @@ protected void replaceIfEffectiveValue(Map properties, String ke public static final ShowResultSetMetaData STORAGE_VAULT_META_DATA = ShowResultSetMetaData.builder() - .addColumn(new Column("StorageVaultName", ScalarType.createVarchar(100))) - .addColumn(new Column("StorageVaultId", ScalarType.createVarchar(20))) + .addColumn(new Column("Name", ScalarType.createVarchar(100))) + .addColumn(new Column("Id", ScalarType.createVarchar(20))) .addColumn(new Column("Propeties", ScalarType.createVarchar(65535))) .addColumn(new Column("IsDefault", ScalarType.createVarchar(5))) .build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java index fdb60357f40ba0..1c1d7e3588065a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java @@ -68,6 +68,7 @@ public class TableProperty implements Writable, GsonPostProcessable { private boolean isInMemory = false; private short minLoadReplicaNum = -1; private long ttlSeconds = 0L; + private boolean isInAtomicRestore = false; private String storagePolicy = ""; private Boolean isBeingSynced = null; @@ -218,6 +219,26 @@ public TableProperty buildInMemory() { return this; } + public TableProperty buildInAtomicRestore() { + isInAtomicRestore = Boolean.parseBoolean(properties.getOrDefault( + PropertyAnalyzer.PROPERTIES_IN_ATOMIC_RESTORE, "false")); + return this; + } + + public boolean isInAtomicRestore() { + return isInAtomicRestore; + } + + public TableProperty setInAtomicRestore() { + properties.put(PropertyAnalyzer.PROPERTIES_IN_ATOMIC_RESTORE, "true"); + return this; + } + + public TableProperty clearInAtomicRestore() { + properties.remove(PropertyAnalyzer.PROPERTIES_IN_ATOMIC_RESTORE); + return this; + } + public TableProperty buildTTLSeconds() { ttlSeconds = Long.parseLong(properties.getOrDefault(PropertyAnalyzer.PROPERTIES_FILE_CACHE_TTL_SECONDS, "0")); return this; @@ -705,6 +726,7 @@ public void gsonPostProcess() throws IOException { buildTimeSeriesCompactionLevelThreshold(); buildTTLSeconds(); buildVariantEnableFlattenNested(); + buildInAtomicRestore(); if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_105) { // get replica num from property map and create replica allocation diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java index faf1fcab94f2df..18e1bfd65266c9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletStatMgr.java @@ -182,7 +182,7 @@ protected void runAfterCatalogReady() { index.setRowCount(indexRowCount); LOG.debug("Table {} index {} all tablets reported[{}], row count {}", olapTable.getName(), olapTable.getIndexNameById(index.getId()), - indexReported, tableRowCount); + indexReported, indexRowCount); } // end for indices } // end for partitions diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java index 707464aeaf7e1d..236a1f0fc28520 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/View.java @@ -258,9 +258,11 @@ public static View read(DataInput in) throws IOException { return GsonUtils.GSON.fromJson(Text.readString(in), View.class); } - public void resetIdsForRestore(Env env, String srcDbName, String dbName) { + public void resetIdsForRestore(Env env) { id = env.getNextId(); + } + public void resetViewDefForRestore(String srcDbName, String dbName) { // the source db name is not setted in old BackupMeta, keep compatible with the old one. if (srcDbName != null) { inlineViewDef = inlineViewDef.replaceAll(srcDbName, dbName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessControllerFactory.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessControllerFactory.java index 545e7a26836761..33e3f4a64c199a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessControllerFactory.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/authorizer/ranger/hive/RangerHiveAccessControllerFactory.java @@ -23,6 +23,12 @@ import java.util.Map; public class RangerHiveAccessControllerFactory implements AccessControllerFactory { + + @Override + public String factoryIdentifier() { + return "ranger-hive"; + } + @Override public CatalogAccessController createAccessController(Map prop) { return new RangerCacheHiveAccessController(prop); diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java index 4f359446aad30f..cbb64797612934 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManager.java @@ -331,7 +331,7 @@ private List> fetchOneClusterHotSpo return responseList; } - private Long getFileCacheUsedBytes(String clusterName) throws RuntimeException { + private Long getFileCacheCapacity(String clusterName) throws RuntimeException { List backends = ((CloudSystemInfoService) Env.getCurrentSystemInfo()) .getBackendsByClusterName(clusterName); Long totalFileCache = 0L; @@ -361,56 +361,6 @@ private Long getFileCacheUsedBytes(String clusterName) throws RuntimeException { return totalFileCache; } - private Map> warmUpNewClusterByTable(String dstClusterName, String dbName, String tableName, - String partitionName, boolean isForce) throws RuntimeException { - Database db = Env.getCurrentInternalCatalog().getDbNullable(dbName); - OlapTable table = (OlapTable) db.getTableNullable(tableName); - List partitions = new ArrayList<>(); - if (partitionName.length() != 0) { - partitions.add(table.getPartition(partitionName)); - } else { - partitions.addAll(table.getPartitions()); - } - List backends = ((CloudSystemInfoService) Env.getCurrentSystemInfo()) - .getBackendsByClusterName(dstClusterName); - Long totalFileCache = getFileCacheUsedBytes(dstClusterName); - Long warmUpTotalFileCache = 0L; - List warmUpPartitions = new ArrayList<>(); - for (Partition partition : partitions) { - warmUpTotalFileCache += partition.getDataSize(true); - warmUpPartitions.add(partition); - if (warmUpTotalFileCache > totalFileCache) { - if (!isForce) { - throw new RuntimeException("The cluster " + dstClusterName + "file cache size is not enough"); - } else { - break; - } - } - } - List indexes = new ArrayList<>(); - for (Partition partition : warmUpPartitions) { - indexes.addAll(partition.getMaterializedIndices(IndexExtState.VISIBLE)); - } - List tablets = new ArrayList<>(); - for (MaterializedIndex index : indexes) { - tablets.addAll(index.getTablets()); - } - Map> beToWarmUpTablets = new HashMap<>(); - for (Backend backend : backends) { - Set beTabletIds = ((CloudEnv) Env.getCurrentEnv()) - .getCloudTabletRebalancer() - .getSnapshotTabletsByBeId(backend.getId()); - List warmUpTablets = new ArrayList<>(); - for (Tablet tablet : tablets) { - if (beTabletIds.contains(tablet.getId())) { - warmUpTablets.add(tablet); - } - } - beToWarmUpTablets.put(backend.getId(), warmUpTablets); - } - return beToWarmUpTablets; - } - private Map>> splitBatch(Map> beToWarmUpTablets) { final Long maxSizePerBatch = 10737418240L; // 10G Map>> beToTabletIdBatches = new HashMap<>(); @@ -436,7 +386,7 @@ private Map>> splitBatch(Map> beToWarmU } private Map> warmUpNewClusterByCluster(String dstClusterName, String srcClusterName) { - Long dstTotalFileCache = getFileCacheUsedBytes(dstClusterName); + Long dstTotalFileCache = getFileCacheCapacity(dstClusterName); List> result = getClusterTopNHotPartitions(srcClusterName); Long warmUpTabletsSize = 0L; List tablets = new ArrayList<>(); @@ -571,7 +521,7 @@ private Map> warmUpNewClusterByTable(long jobId, String dstCl List> tables, boolean isForce) throws RuntimeException { Map> beToWarmUpTablets = new HashMap<>(); - Long totalFileCache = getFileCacheUsedBytes(dstClusterName); + Long totalFileCache = getFileCacheCapacity(dstClusterName); Long warmUpTotalFileCache = 0L; for (Triple tableTriple : tables) { if (warmUpTotalFileCache > totalFileCache) { @@ -592,11 +542,12 @@ private Map> warmUpNewClusterByTable(long jobId, String dstCl .getBackendsByClusterName(dstClusterName); List warmUpPartitions = new ArrayList<>(); for (Partition partition : partitions) { - warmUpTotalFileCache += partition.getDataSize(true); - warmUpPartitions.add(partition); - if (warmUpTotalFileCache > totalFileCache) { + Long partitionSize = partition.getDataSize(true); + if ((warmUpTotalFileCache + partitionSize) > totalFileCache) { break; } + warmUpTotalFileCache += partitionSize; + warmUpPartitions.add(partition); } List indexes = new ArrayList<>(); for (Partition partition : warmUpPartitions) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManagerUtils.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManagerUtils.java index 8e46547ae6c121..d73003e0daf9ea 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManagerUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/CacheHotspotManagerUtils.java @@ -231,7 +231,6 @@ public static AutoCloseConnectContext buildConnectContext() { sessionVariable.setEnableInsertStrict(true); sessionVariable.setInsertMaxFilterRatio(1); // sessionVariable.parallelExecInstanceNum = StatisticConstants.STATISTIC_PARALLEL_EXEC_INSTANCE_NUM; - sessionVariable.setEnableNereidsPlanner(false); sessionVariable.enableProfile = false; connectContext.setEnv(Env.getCurrentEnv()); connectContext.setDatabase(FeConstants.INTERNAL_DB_NAME); diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java index 567dc4b31242d4..e27339c2aacc14 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudClusterChecker.java @@ -52,6 +52,8 @@ public class CloudClusterChecker extends MasterDaemon { private CloudSystemInfoService cloudSystemInfoService; + boolean isUpdateCloudUniqueId = false; + public CloudClusterChecker(CloudSystemInfoService cloudSystemInfoService) { super("cloud cluster check", Config.cloud_cluster_check_interval_second * 1000L); this.cloudSystemInfoService = cloudSystemInfoService; @@ -219,7 +221,19 @@ private void checkDiffNode(Map remoteClusterIdToPB, if (LOG.isDebugEnabled()) { LOG.debug("current cluster status {} {}", currentClusterStatus, newClusterStatus); } - if (!currentClusterStatus.equals(newClusterStatus)) { + boolean needChange = false; + // ATTN: found bug, In the same cluster, the cluster status in the tags of BE nodes is inconsistent. + // Using a set to collect the cluster statuses from the BE nodes. + Set clusterStatusInMem = new HashSet<>(); + for (Backend backend : currentBes) { + String beClusterStatus = backend.getTagMap().get(Tag.CLOUD_CLUSTER_STATUS); + clusterStatusInMem.add(beClusterStatus == null ? "NOT_SET" : beClusterStatus); + } + if (clusterStatusInMem.size() != 1) { + LOG.warn("cluster {}, multi be nodes cluster status inconsistent, fix it {}", cid, clusterStatusInMem); + needChange = true; + } + if (!currentClusterStatus.equals(newClusterStatus) || needChange) { // cluster's status changed LOG.info("cluster_status corresponding to cluster_id has been changed," + " cluster_id : {} , current_cluster_status : {}, new_cluster_status :{}", @@ -382,6 +396,23 @@ private void checkCloudFes() { List toAdd = new ArrayList<>(); List toDel = new ArrayList<>(); List expectedFes = cpb.getNodesList(); + + if (!isUpdateCloudUniqueId) { + // Just run once and number of fes is small, so iterating is ok. + // newly addde fe has cloudUniqueId. + for (Frontend fe : currentFes) { + for (Cloud.NodeInfoPB node : expectedFes) { + if (fe.getHost().equals(Config.enable_fqdn_mode ? node.getHost() : node.getIp()) + && fe.getEditLogPort() == node.getEditLogPort()) { + fe.setCloudUniqueId(node.getCloudUniqueId()); + LOG.info("update cloud unique id result {}", fe); + break; + } + } + } + isUpdateCloudUniqueId = true; + } + diffNodes(toAdd, toDel, () -> { // memory Map currentMap = new HashMap<>(); @@ -395,6 +426,7 @@ private void checkCloudFes() { endpoint = endpoint + "_" + fe.getRole(); currentMap.put(endpoint, fe); } + LOG.info("fes in memory {}", currentMap); return currentMap; }, () -> { // meta service @@ -413,21 +445,24 @@ private void checkCloudFes() { Cloud.NodeInfoPB.NodeType type = node.getNodeType(); // ATTN: just allow to add follower or observer if (Cloud.NodeInfoPB.NodeType.FE_MASTER.equals(type)) { - LOG.warn("impossible !!!, get fe node {} type equel master from ms", node); + LOG.warn("impossible !!!, get fe node {} type equal master from ms", node); } - FrontendNodeType role = type == Cloud.NodeInfoPB.NodeType.FE_FOLLOWER - ? FrontendNodeType.FOLLOWER : FrontendNodeType.OBSERVER; + FrontendNodeType role = type == Cloud.NodeInfoPB.NodeType.FE_OBSERVER + ? FrontendNodeType.OBSERVER : FrontendNodeType.FOLLOWER; Frontend fe = new Frontend(role, CloudEnv.genFeNodeNameFromMeta(host, node.getEditLogPort(), node.getCtime() * 1000), host, node.getEditLogPort()); + fe.setCloudUniqueId(node.getCloudUniqueId()); // add type to map key, for diff endpoint = endpoint + "_" + fe.getRole(); nodeMap.put(endpoint, fe); } + LOG.info("fes in ms {}", nodeMap); + return nodeMap; }); - LOG.info("diffFrontends nodes: {}, current: {}, toAdd: {}, toDel: {}", - expectedFes, currentFes, toAdd, toDel); + LOG.info("diffFrontends nodes: {}, current: {}, toAdd: {}, toDel: {}, enable auto start: {}", + expectedFes, currentFes, toAdd, toDel, Config.enable_auto_start_for_cloud_cluster); if (toAdd.isEmpty() && toDel.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("runAfterCatalogReady getObserverFes nothing todo"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudEnv.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudEnv.java index e212a7f948ecba..3138bad382a0fb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudEnv.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudEnv.java @@ -38,9 +38,11 @@ import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.common.UserException; import org.apache.doris.common.io.CountingDataOutputStream; +import org.apache.doris.common.util.NetUtils; import org.apache.doris.ha.FrontendNodeType; import org.apache.doris.mysql.privilege.PrivPredicate; import org.apache.doris.qe.ConnectContext; +import org.apache.doris.system.Frontend; import org.apache.doris.system.SystemInfoService.HostInfo; import com.google.common.base.Preconditions; @@ -50,7 +52,6 @@ import java.io.DataInputStream; import java.io.IOException; -import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Optional; @@ -71,6 +72,8 @@ public class CloudEnv extends Env { private CleanCopyJobScheduler cleanCopyJobScheduler; + private String cloudInstanceId; + public CloudEnv(boolean isCheckpointCatalog) { super(isCheckpointCatalog); this.cleanCopyJobScheduler = new CleanCopyJobScheduler(); @@ -91,17 +94,32 @@ public CloudUpgradeMgr getCloudUpgradeMgr() { return this.upgradeMgr; } + public String getCloudInstanceId() { + return cloudInstanceId; + } + + private void setCloudInstanceId(String cloudInstanceId) { + this.cloudInstanceId = cloudInstanceId; + } + @Override public void initialize(String[] args) throws Exception { - if (Strings.isNullOrEmpty(Config.cloud_unique_id)) { - if (Strings.isNullOrEmpty(Config.cloud_instance_id)) { - throw new UserException("cloud_instance_id must be specified if deployed in dissaggregated"); - } - LOG.info("cloud_unique_id is not set, setting it using instance_id"); - Config.cloud_unique_id = "1:" + Config.cloud_instance_id + ":sql_server00"; + if (Strings.isNullOrEmpty(Config.cloud_unique_id) && Config.cluster_id == -1) { + throw new UserException("cluster_id must be specified in fe.conf if deployed " + + "in cloud mode, because FE should known to which it belongs"); } - LOG.info("Initializing CloudEnv with cloud_unique_id: {}", Config.cloud_unique_id); + if (Config.cluster_id != -1) { + setCloudInstanceId(String.valueOf(Config.cluster_id)); + } + + if (Strings.isNullOrEmpty(Config.cloud_unique_id) && !Strings.isNullOrEmpty(cloudInstanceId)) { + Config.cloud_unique_id = "1:" + cloudInstanceId + ":fe"; + LOG.info("cloud_unique_id is empty, setting it to: {}", Config.cloud_unique_id); + } + + LOG.info("Initializing CloudEnv with cloud_unique_id: {}, cluster_id: {}, cloudInstanceId: {}", + Config.cloud_unique_id, Config.cluster_id, cloudInstanceId); super.initialize(args); } @@ -162,23 +180,12 @@ private Cloud.NodeInfoPB getLocalTypeFromMetaService() { .stream().filter(NodeInfoPB::hasNodeType).collect(Collectors.toList()); helperNodes.clear(); - if (allNodes.stream().anyMatch(n -> n.getNodeType() == NodeInfoPB.NodeType.FE_FOLLOWER)) { - // multi followers mode, select first - Optional helperNode = allNodes.stream() - .filter(nodeInfoPB -> nodeInfoPB.getNodeType() == NodeInfoPB.NodeType.FE_FOLLOWER) - .map(nodeInfoPB -> new HostInfo( - Config.enable_fqdn_mode ? nodeInfoPB.getHost() : nodeInfoPB.getIp(), nodeInfoPB.getEditLogPort())) - .min(Comparator.comparing(HostInfo::getHost)); - helperNode.ifPresent(hostInfo -> helperNodes.add(hostInfo)); - } else { - // master observers mode - // helper node select follower's first, just one - helperNodes.addAll(allNodes.stream() - .filter(nodeInfoPB -> nodeInfoPB.getNodeType() == NodeInfoPB.NodeType.FE_MASTER) - .map(nodeInfoPB -> new HostInfo( - Config.enable_fqdn_mode ? nodeInfoPB.getHost() : nodeInfoPB.getIp(), nodeInfoPB.getEditLogPort())) - .collect(Collectors.toList())); - // check only have one master node. + Optional firstNonObserverNode = allNodes.stream().findFirst(); + if (firstNonObserverNode.isPresent()) { + helperNodes.add(new HostInfo( + Config.enable_fqdn_mode ? firstNonObserverNode.get().getHost() + : firstNonObserverNode.get().getIp(), + firstNonObserverNode.get().getEditLogPort())); } Preconditions.checkState(helperNodes.size() == 1); @@ -187,14 +194,11 @@ private Cloud.NodeInfoPB getLocalTypeFromMetaService() { return local.orElse(null); } - private void tryAddMyselToMS() { + private void tryAddMyselfToMS() { try { try { - if (Strings.isNullOrEmpty(Config.cloud_instance_id)) { - throw new DdlException("unable to create instance due to empty cloud_instance_id"); - } - getCloudSystemInfoService().tryCreateInstance(Config.cloud_instance_id, - Config.cloud_instance_id, false); + getCloudSystemInfoService().tryCreateInstance(getCloudInstanceId(), + getCloudInstanceId(), false); } catch (Exception e) { return; } @@ -219,7 +223,7 @@ protected void getClusterIdAndRole() throws IOException { LOG.warn("failed to get local fe's type, sleep {} s, try again.", Config.resource_not_ready_sleep_seconds); if (isStartFromEmpty()) { - tryAddMyselToMS(); + tryAddMyselfToMS(); } try { Thread.sleep(Config.resource_not_ready_sleep_seconds * 1000); @@ -228,10 +232,20 @@ protected void getClusterIdAndRole() throws IOException { } continue; } + type = nodeInfoPB.getNodeType(); break; } + try { + String instanceId; + instanceId = getCloudSystemInfoService().getInstanceId(Config.cloud_unique_id); + setCloudInstanceId(instanceId); + } catch (IOException e) { + LOG.error("Failed to get instance ID from cloud_unique_id: {}", Config.cloud_unique_id, e); + throw e; + } + LOG.info("current fe's role is {}", type == NodeInfoPB.NodeType.FE_MASTER ? "MASTER" : type == NodeInfoPB.NodeType.FE_FOLLOWER ? "FOLLOWER" : type == NodeInfoPB.NodeType.FE_OBSERVER ? "OBSERVER" : "UNKNOWN"); @@ -399,11 +413,25 @@ public void dropFrontend(FrontendNodeType role, String host, int port) throws Dd throw new DdlException("can not drop current master node."); } - getCloudSystemInfoService().dropFrontend(role, host, port); + Frontend frontend = checkFeExist(host, port); + if (frontend == null) { + throw new DdlException("Frontend does not exist."); + } + + if (frontend.getRole() != role) { + throw new DdlException(role.toString() + " does not exist[" + NetUtils + .getHostPortInAccessibleFormat(host, port) + "]"); + } + + if (Strings.isNullOrEmpty(frontend.getCloudUniqueId())) { + throw new DdlException("Frontend does not have a cloudUniqueId, wait for a minute."); + } + + getCloudSystemInfoService().dropFrontend(frontend); } @Override public void modifyFrontendHostName(String srcHost, int srcPort, String destHost) throws DdlException { - throw new DdlException("modify frontend host name is not supported in cloud mode"); + throw new DdlException("Modifying frontend hostname is not supported in cloud mode"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java index c1c58f7b898ca8..d4d57a6acd2da7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java @@ -72,6 +72,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import doris.segment_v2.SegmentV2; +import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -101,8 +102,7 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa String storagePolicy, IdGeneratorBuffer idGeneratorBuffer, BinlogConfig binlogConfig, - boolean isStorageMediumSpecified, - List clusterKeyIndexes) + boolean isStorageMediumSpecified) throws DdlException { // create base index first. Preconditions.checkArgument(tbl.getBaseIndexId() != -1); @@ -156,6 +156,11 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa } else { indexes = Lists.newArrayList(); } + List clusterKeyIndexes = null; + if (indexId == tbl.getBaseIndexId()) { + // only base and shadow index need cluster key indexes + clusterKeyIndexes = OlapTable.getClusterKeyIndexes(columns); + } Cloud.CreateTabletsRequest.Builder requestBuilder = Cloud.CreateTabletsRequest.newBuilder(); List rowStoreColumns = tbl.getTableProperty().getCopiedRowStoreColumns(); @@ -175,7 +180,7 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa tbl.getEnableMowLightDelete(), tbl.getInvertedIndexFileStorageFormat(), tbl.rowStorePageSize(), - tbl.variantEnableFlattenNested()); + tbl.variantEnableFlattenNested(), clusterKeyIndexes); requestBuilder.addTabletMetas(builder); } if (!storageVaultIdSet && ((CloudEnv) Env.getCurrentEnv()).getEnableStorageVault()) { @@ -184,7 +189,7 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa requestBuilder.setDbId(dbId); LOG.info("create tablets, dbId: {}, tableId: {}, tableName: {}, partitionId: {}, partitionName: {}, " - + "indexId: {}, vault name {}", + + "indexId: {}, vault name: {}", dbId, tbl.getId(), tbl.getName(), partitionId, partitionName, indexId, storageVaultName); Cloud.CreateTabletsResponse resp = sendCreateTabletsRpc(requestBuilder); // If the resp has no vault id set, it means the MS is running with enable_storage_vault false @@ -225,7 +230,7 @@ public OlapFile.TabletMetaCloudPB.Builder createTabletMetaBuilder(long tableId, Long timeSeriesCompactionLevelThreshold, boolean disableAutoCompaction, List rowStoreColumnUniqueIds, boolean enableMowLightDelete, TInvertedIndexFileStorageFormat invertedIndexFileStorageFormat, long pageSize, - boolean variantEnableFlattenNested) throws DdlException { + boolean variantEnableFlattenNested, List clusterKeyIdxes) throws DdlException { OlapFile.TabletMetaCloudPB.Builder builder = OlapFile.TabletMetaCloudPB.newBuilder(); builder.setTableId(tableId); builder.setIndexId(indexId); @@ -353,6 +358,9 @@ public OlapFile.TabletMetaCloudPB.Builder createTabletMetaBuilder(long tableId, } schemaBuilder.setRowStorePageSize(pageSize); schemaBuilder.setEnableVariantFlattenNested(variantEnableFlattenNested); + if (!CollectionUtils.isEmpty(clusterKeyIdxes)) { + schemaBuilder.addAllClusterKeyIdxes(clusterKeyIdxes); + } OlapFile.TabletSchemaCloudPB schema = schemaBuilder.build(); builder.setSchema(schema); diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java index 0c8e20827167d4..cca896614d53c6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/load/CloudBrokerLoadJob.java @@ -25,6 +25,7 @@ import org.apache.doris.catalog.Table; import org.apache.doris.cloud.system.CloudSystemInfoService; import org.apache.doris.common.MetaNotFoundException; +import org.apache.doris.common.Status; import org.apache.doris.common.UserException; import org.apache.doris.common.util.LogBuilder; import org.apache.doris.common.util.LogKey; @@ -45,6 +46,7 @@ import org.apache.doris.qe.QeProcessorImpl; import org.apache.doris.qe.StmtExecutor; import org.apache.doris.system.Backend; +import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TUniqueId; import com.google.common.base.Strings; @@ -316,7 +318,7 @@ protected void unprotectedExecuteRetry(FailMsg failMsg) { for (TUniqueId loadId : loadIds) { Coordinator coordinator = QeProcessorImpl.INSTANCE.getCoordinator(loadId); if (coordinator != null) { - coordinator.cancel(); + coordinator.cancel(new Status(TStatusCode.CANCELLED, "load job failed")); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java index 606f52369e5f7c..dbd582c21ea6fc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/system/CloudSystemInfoService.java @@ -17,6 +17,8 @@ package org.apache.doris.cloud.system; +import org.apache.doris.analysis.ModifyBackendClause; +import org.apache.doris.analysis.ModifyBackendHostNameClause; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.ReplicaAllocation; import org.apache.doris.cloud.catalog.CloudEnv; @@ -50,11 +52,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; @@ -302,7 +306,7 @@ public synchronized void updateFrontends(List toAdd, List to } try { Env.getCurrentEnv().addFrontend(fe.getRole(), - fe.getHost(), fe.getEditLogPort(), fe.getNodeName()); + fe.getHost(), fe.getEditLogPort(), fe.getNodeName(), fe.getCloudUniqueId()); LOG.info("added cloud frontend={} ", fe); } catch (DdlException e) { LOG.warn("failed to add cloud frontend={} ", fe); @@ -310,19 +314,20 @@ public synchronized void updateFrontends(List toAdd, List to } } - private void alterBackendCluster(List hostInfos, String clusterId, + private void alterBackendCluster(List hostInfos, String computeGroupId, String cloudUniqueId, Cloud.AlterClusterRequest.Operation operation) throws DdlException { - if (Strings.isNullOrEmpty(Config.cloud_instance_id)) { + if (Strings.isNullOrEmpty(((CloudEnv) Env.getCurrentEnv()).getCloudInstanceId())) { throw new DdlException("unable to alter backends due to empty cloud_instance_id"); } // Issue rpc to meta to alter node, then fe master would add this node to its frontends Cloud.ClusterPB clusterPB = Cloud.ClusterPB.newBuilder() - .setClusterId(clusterId) + .setClusterId(computeGroupId) .setType(Cloud.ClusterPB.Type.COMPUTE) .build(); for (HostInfo hostInfo : hostInfos) { Cloud.NodeInfoPB nodeInfoPB = Cloud.NodeInfoPB.newBuilder() + .setCloudUniqueId(cloudUniqueId) .setIp(hostInfo.getHost()) .setHost(hostInfo.getHost()) .setHeartbeatPort(hostInfo.getPort()) @@ -332,7 +337,7 @@ private void alterBackendCluster(List hostInfos, String clusterId, } Cloud.AlterClusterRequest request = Cloud.AlterClusterRequest.newBuilder() - .setInstanceId(Config.cloud_instance_id) + .setInstanceId(((CloudEnv) Env.getCurrentEnv()).getCloudInstanceId()) .setOp(operation) .setCluster(clusterPB) .build(); @@ -359,13 +364,18 @@ private void alterBackendCluster(List hostInfos, String clusterId, public void addBackends(List hostInfos, Map tagMap) throws UserException { // issue rpc to meta to add this node, then fe master would add this node to its backends - String clusterName = tagMap.getOrDefault(Tag.CLOUD_CLUSTER_NAME, Tag.VALUE_DEFAULT_CLOUD_CLUSTER_NAME); + String clusterName = tagMap.getOrDefault(Tag.COMPUTE_GROUP_NAME, Tag.VALUE_DEFAULT_COMPUTE_GROUP_NAME); if (clusterName.isEmpty()) { - throw new UserException("clusterName empty"); + throw new UserException("ComputeGroup'name can not be empty"); } - String clusterId = tryCreateCluster(clusterName, RandomIdentifierGenerator.generateRandomIdentifier(8)); - alterBackendCluster(hostInfos, clusterId, Cloud.AlterClusterRequest.Operation.ADD_NODE); + String computeGroupId = tryCreateComputeGroup(clusterName, + RandomIdentifierGenerator.generateRandomIdentifier(8)); + String instanceId = Config.cluster_id == -1 ? ((CloudEnv) Env.getCurrentEnv()).getCloudInstanceId() + : String.valueOf(Config.cluster_id); + + String cloudUniqueId = "1:" + instanceId + ":" + RandomIdentifierGenerator.generateRandomIdentifier(8); + alterBackendCluster(hostInfos, computeGroupId, cloudUniqueId, Cloud.AlterClusterRequest.Operation.ADD_NODE); } // final entry of dropping backend @@ -378,28 +388,32 @@ public void dropBackend(String host, int heartbeatPort) throws DdlException { throw new DdlException("backend does not exists[" + NetUtils .getHostPortInAccessibleFormat(host, heartbeatPort) + "]"); } - String clusterId = droppedBackend.getTagMap().get(Tag.CLOUD_CLUSTER_ID); - if (clusterId == null || clusterId.isEmpty()) { + String computeGroupId = droppedBackend.getTagMap().get(Tag.CLOUD_CLUSTER_ID); + if (computeGroupId == null || computeGroupId.isEmpty()) { throw new DdlException("Failed to get cluster ID for backend: " + droppedBackend.getId()); } List hostInfos = new ArrayList<>(); hostInfos.add(new HostInfo(host, heartbeatPort)); - alterBackendCluster(hostInfos, clusterId, Cloud.AlterClusterRequest.Operation.DROP_NODE); + String cloudUniqueId = droppedBackend.getCloudUniqueId(); + alterBackendCluster(hostInfos, computeGroupId, cloudUniqueId, + Cloud.AlterClusterRequest.Operation.DROP_NODE); } @Override public void decommissionBackend(Backend backend) throws UserException { - String clusterId = backend.getTagMap().get(Tag.CLOUD_CLUSTER_ID); - if (clusterId == null || clusterId.isEmpty()) { + String computeGroupId = backend.getTagMap().get(Tag.CLOUD_CLUSTER_ID); + if (computeGroupId == null || computeGroupId.isEmpty()) { throw new UserException("Failed to get cluster ID for backend: " + backend.getId()); } List hostInfos = new ArrayList<>(); hostInfos.add(new HostInfo(backend.getHost(), backend.getHeartbeatPort())); try { - alterBackendCluster(hostInfos, clusterId, Cloud.AlterClusterRequest.Operation.DECOMMISSION_NODE); + String cloudUniqueId = backend.getCloudUniqueId(); + alterBackendCluster(hostInfos, computeGroupId, cloudUniqueId, + Cloud.AlterClusterRequest.Operation.DECOMMISSION_NODE); } catch (DdlException e) { String errorMessage = e.getMessage(); LOG.warn("Failed to decommission backend: {}", errorMessage); @@ -407,6 +421,16 @@ public void decommissionBackend(Backend backend) throws UserException { } } + @Override + public void modifyBackends(ModifyBackendClause alterClause) throws UserException { + throw new UserException("Modifying backends is not supported in cloud mode"); + } + + @Override + public void modifyBackendHost(ModifyBackendHostNameClause clause) throws UserException { + throw new UserException("Modifying backend hostname is not supported in cloud mode"); + } + @Override public void replayAddBackend(Backend newBackend) { super.replayAddBackend(newBackend); @@ -567,10 +591,18 @@ public String getCloudStatusById(final String clusterId) { } } + public Set getClusterStatus(List backends) { + // ATTN: found bug, In the same cluster, the cluster status in the tags of BE nodes is inconsistent. + // Using a set to collect the cluster statuses from the BE nodes. + return backends.stream().map(Backend::getCloudClusterStatus).collect(Collectors.toSet()); + } + public String getCloudStatusByIdNoLock(final String clusterId) { - return clusterIdToBackend.getOrDefault(clusterId, new ArrayList<>()) - .stream().map(Backend::getCloudClusterStatus).findFirst() - .orElse(String.valueOf(Cloud.ClusterStatus.UNKNOWN)); + List bes = clusterIdToBackend.getOrDefault(clusterId, new ArrayList<>()); + Optional hasNormal = bes.stream().map(Backend::getCloudClusterStatus) + .filter(status -> status.equals(String.valueOf(Cloud.ClusterStatus.NORMAL))).findAny(); + return hasNormal.orElseGet(() -> bes.stream().map(Backend::getCloudClusterStatus).findFirst() + .orElse(String.valueOf(Cloud.ClusterStatus.NORMAL))); } public void updateClusterNameToId(final String newName, @@ -693,6 +725,7 @@ public String addCloudCluster(final String clusterName, final String userName) t List backends = new ArrayList<>(); for (Cloud.NodeInfoPB node : cpb.getNodesList()) { Map newTagMap = Tag.DEFAULT_BACKEND_TAG.toMap(); + newTagMap.put(Tag.CLOUD_CLUSTER_NAME, clusterNameMeta); newTagMap.put(Tag.CLOUD_CLUSTER_ID, clusterId); newTagMap.put(Tag.CLOUD_CLUSTER_STATUS, String.valueOf(clusterStatus)); @@ -764,18 +797,29 @@ public Map getCloudClusterNameToId() { // FrontendCluster = SqlServerCluster private void alterFrontendCluster(FrontendNodeType role, String host, int editLogPort, - Cloud.AlterClusterRequest.Operation op) throws DdlException { - if (Strings.isNullOrEmpty(Config.cloud_instance_id)) { + String cloudUnqiueID, Cloud.AlterClusterRequest.Operation op) throws DdlException { + if (Strings.isNullOrEmpty(((CloudEnv) Env.getCurrentEnv()).getCloudInstanceId())) { throw new DdlException("unable to alter frontend due to empty cloud_instance_id"); } + Cloud.NodeInfoPB.NodeType nodeType; + if (role == FrontendNodeType.MASTER) { + nodeType = Cloud.NodeInfoPB.NodeType.FE_MASTER; + } else if (role == FrontendNodeType.FOLLOWER) { + nodeType = Cloud.NodeInfoPB.NodeType.FE_FOLLOWER; + } else if (role == FrontendNodeType.OBSERVER) { + nodeType = Cloud.NodeInfoPB.NodeType.FE_OBSERVER; + } else { + throw new DdlException("unable to alter frontend due to invalid role"); + } + // Issue rpc to meta to add this node, then fe master would add this node to its frontends Cloud.NodeInfoPB nodeInfoPB = Cloud.NodeInfoPB.newBuilder() + .setCloudUniqueId(cloudUnqiueID) .setIp(host) .setHost(host) .setEditLogPort(editLogPort) - .setNodeType(role == FrontendNodeType.MASTER ? Cloud.NodeInfoPB.NodeType.FE_MASTER - : Cloud.NodeInfoPB.NodeType.FE_OBSERVER) + .setNodeType(nodeType) .setCtime(System.currentTimeMillis() / 1000) .build(); @@ -787,7 +831,7 @@ private void alterFrontendCluster(FrontendNodeType role, String host, int editLo .build(); Cloud.AlterClusterRequest request = Cloud.AlterClusterRequest.newBuilder() - .setInstanceId(Config.cloud_instance_id) + .setInstanceId(((CloudEnv) Env.getCurrentEnv()).getCloudInstanceId()) .setOp(op) .setCluster(clusterPB) .build(); @@ -810,20 +854,21 @@ public void addFrontend(FrontendNodeType role, String host, int editLogPort) thr Cloud.AlterClusterRequest.Operation op; op = role == FrontendNodeType.MASTER ? Cloud.AlterClusterRequest.Operation.ADD_CLUSTER : Cloud.AlterClusterRequest.Operation.ADD_NODE; - alterFrontendCluster(role, host, editLogPort, op); + alterFrontendCluster(role, host, editLogPort, Config.cloud_unique_id, op); } - public void dropFrontend(FrontendNodeType role, String host, int editLogPort) throws DdlException { - alterFrontendCluster(role, host, editLogPort, Cloud.AlterClusterRequest.Operation.DROP_NODE); + public void dropFrontend(Frontend frontend) throws DdlException { + alterFrontendCluster(frontend.getRole(), frontend.getHost(), frontend.getEditLogPort(), + frontend.getCloudUniqueId(), Cloud.AlterClusterRequest.Operation.DROP_NODE); } - private String tryCreateCluster(String clusterName, String clusterId) throws UserException { - if (Strings.isNullOrEmpty(Config.cloud_instance_id)) { - throw new DdlException("unable to create cluster due to empty cloud_instance_id"); + private String tryCreateComputeGroup(String clusterName, String computeGroupId) throws UserException { + if (Strings.isNullOrEmpty(((CloudEnv) Env.getCurrentEnv()).getCloudInstanceId())) { + throw new DdlException("unable to create compute group due to empty cluster_id"); } Cloud.ClusterPB clusterPB = Cloud.ClusterPB.newBuilder() - .setClusterId(clusterId) + .setClusterId(computeGroupId) .setClusterName(clusterName) .setType(Cloud.ClusterPB.Type.COMPUTE) .build(); @@ -846,7 +891,7 @@ private String tryCreateCluster(String clusterName, String clusterId) throws Use } if (response.getStatus().getCode() == Cloud.MetaServiceCode.OK) { - return clusterId; + return computeGroupId; } else if (response.getStatus().getCode() == Cloud.MetaServiceCode.ALREADY_EXISTED) { Cloud.GetClusterResponse clusterResponse = getCloudCluster(clusterName, "", ""); if (clusterResponse.getStatus().getCode() == Cloud.MetaServiceCode.OK) { @@ -949,6 +994,9 @@ public String waitForAutoStart(String clusterName) throws DdlException { if (Config.isNotCloudMode()) { return null; } + if (!Config.enable_auto_start_for_cloud_cluster) { + return null; + } clusterName = getClusterNameAutoStart(clusterName); if (Strings.isNullOrEmpty(clusterName)) { LOG.warn("auto start in cloud mode, but clusterName empty {}", clusterName); @@ -999,7 +1047,7 @@ public String waitForAutoStart(String clusterName) throws DdlException { } } // wait 5 mins - int retryTimes = 5 * 60; + int retryTimes = Config.auto_start_wait_to_resume_times < 0 ? 300 : Config.auto_start_wait_to_resume_times; int retryTime = 0; StopWatch stopWatch = new StopWatch(); stopWatch.start(); @@ -1063,4 +1111,24 @@ public void tryCreateInstance(String instanceId, String name, boolean sseEnabled throw new DdlException("Failed to create instance"); } } + + public String getInstanceId(String cloudUniqueId) throws IOException { + Cloud.GetInstanceRequest.Builder builder = Cloud.GetInstanceRequest.newBuilder(); + builder.setCloudUniqueId(cloudUniqueId); + + Cloud.GetInstanceResponse response; + try { + Cloud.GetInstanceRequest request = builder.build(); + response = MetaServiceProxy.getInstance().getInstance(request); + LOG.info("get instance info, request: {}, response: {}", request, response); + if (response.getStatus().getCode() != Cloud.MetaServiceCode.OK) { + LOG.warn("Failed to get instance info, response: {}", response); + throw new IOException("Failed to get instance info"); + } + return response.getInstance().getInstanceId(); + } catch (RpcException e) { + LOG.warn("Failed to get instance info {}", cloudUniqueId, e); + throw new IOException("Failed to get instance info"); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java index 7349efb3fca56d..a5e0b70bd64aad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java @@ -142,6 +142,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; public class CloudGlobalTransactionMgr implements GlobalTransactionMgrIface { @@ -150,6 +151,7 @@ public class CloudGlobalTransactionMgr implements GlobalTransactionMgrIface { private TxnStateCallbackFactory callbackFactory; private final Map subTxnIdToTxnId = new ConcurrentHashMap<>(); + private Map waitToCommitTxnCountMap = new ConcurrentHashMap<>(); public CloudGlobalTransactionMgr() { this.callbackFactory = new TxnStateCallbackFactory(); @@ -327,6 +329,7 @@ public void preCommitTransaction2PC(Database db, List tableList, long tra } } + @Deprecated @Override public void commitTransaction(long dbId, List
tableList, long transactionId, List tabletCommitInfos) @@ -794,8 +797,8 @@ private void getDeleteBitmapUpdateLock(Map> tableToParttions, lo LOG.warn("ignore get delete bitmap lock exception, transactionId={}, retryTime={}", transactionId, retryTime, e); } - // sleep random millis [20, 200] ms, avoid txn conflict - int randomMillis = 20 + (int) (Math.random() * (200 - 20)); + // sleep random millis [20, 300] ms, avoid txn conflict + int randomMillis = 20 + (int) (Math.random() * (300 - 20)); if (LOG.isDebugEnabled()) { LOG.debug("randomMillis:{}", randomMillis); } @@ -867,6 +870,7 @@ private void sendCalcDeleteBitmaptask(long dbId, long transactionId, // not check return value, because the add will success AgentTaskQueue.addTask(task); batchTask.addTask(task); + LOG.info("send calculate delete bitmap task to be {}, txn_id {}", entry.getKey(), transactionId); } AgentTaskExecutor.submit(batchTask); @@ -929,7 +933,28 @@ private void debugCalcDeleteBitmapRandomTimeout() throws UserException { public boolean commitAndPublishTransaction(DatabaseIf db, List
tableList, long transactionId, List tabletCommitInfos, long timeoutMillis) throws UserException { - return commitAndPublishTransaction(db, tableList, transactionId, tabletCommitInfos, timeoutMillis, null); + int retryTimes = 0; + boolean res = false; + while (true) { + try { + res = commitAndPublishTransaction(db, tableList, transactionId, tabletCommitInfos, timeoutMillis, null); + break; + } catch (UserException e) { + LOG.warn("failed to commit txn, txnId={},retryTimes={},exception={}", + transactionId, retryTimes, e); + // only mow table will catch DELETE_BITMAP_LOCK_ERR and need to retry + if (e.getErrorCode() == InternalErrorCode.DELETE_BITMAP_LOCK_ERR) { + retryTimes++; + if (retryTimes >= Config.mow_calculate_delete_bitmap_retry_times) { + // should throw exception after running out of retry times + throw e; + } + } else { + throw e; + } + } + } + return res; } @Override @@ -971,7 +996,19 @@ public boolean commitAndPublishTransaction(DatabaseIf db, long transactionId, public boolean commitAndPublishTransaction(DatabaseIf db, List
tableList, long transactionId, List tabletCommitInfos, long timeoutMillis, TxnCommitAttachment txnCommitAttachment) throws UserException { + for (int i = 0; i < tableList.size(); i++) { + long tableId = tableList.get(i).getId(); + LOG.info("start commit txn=" + transactionId + ",table=" + tableId); + } + for (Map.Entry entry : waitToCommitTxnCountMap.entrySet()) { + if (entry.getValue().get() > 5) { + LOG.info("now table {} commitAndPublishTransaction queue is {}", entry.getKey(), + entry.getValue().get()); + } + } + increaseWaitingLockCount(tableList); if (!MetaLockUtils.tryCommitLockTables(tableList, timeoutMillis, TimeUnit.MILLISECONDS)) { + decreaseWaitingLockCount(tableList); // DELETE_BITMAP_LOCK_ERR will be retried on be throw new UserException(InternalErrorCode.DELETE_BITMAP_LOCK_ERR, "get table cloud commit lock timeout, tableList=(" @@ -980,6 +1017,7 @@ public boolean commitAndPublishTransaction(DatabaseIf db, List
tableList, try { commitTransaction(db.getId(), tableList, transactionId, tabletCommitInfos, txnCommitAttachment); } finally { + decreaseWaitingLockCount(tableList); MetaLockUtils.commitUnlockTables(tableList); } return true; @@ -1739,4 +1777,23 @@ public TransactionState abortSubTxn(long txnId, long subTxnId, long dbId, Set tableList) { + for (int i = 0; i < tableList.size(); i++) { + long tableId = tableList.get(i).getId(); + if (waitToCommitTxnCountMap.containsKey(tableId)) { + waitToCommitTxnCountMap.get(tableId).addAndGet(1); + } else { + waitToCommitTxnCountMap.put(tableId, new AtomicInteger()); + waitToCommitTxnCountMap.get(tableId).addAndGet(1); + } + } + } + + private void decreaseWaitingLockCount(List
tableList) { + for (int i = 0; i < tableList.size(); i++) { + long tableId = tableList.get(i).getId(); + waitToCommitTxnCountMap.get(tableId).decrementAndGet(); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java index c65116dcc8310b..68e15e7ee46e81 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorCode.java @@ -1228,9 +1228,9 @@ public enum ErrorCode { "There can only be one stmt that returns the result and it is at the end."), ERR_CLOUD_CLUSTER_ERROR(5098, new byte[]{'4', '2', '0', '0', '0'}, - "Cluster %s not exist, use SQL 'SHOW CLUSTERS' to get a valid cluster"), + "Compute group (aka. Cloud cluster) %s not exist, use SQL 'SHOW COMPUTE GROUPS' to get a valid compute group"), - ERR_NO_CLUSTER_ERROR(5099, new byte[]{'4', '2', '0', '0', '0'}, "No cluster selected"), + ERR_NO_CLUSTER_ERROR(5099, new byte[]{'4', '2', '0', '0', '0'}, "No compute group (cloud cluster) selected"), ERR_NOT_CLOUD_MODE(6000, new byte[]{'4', '2', '0', '0', '0'}, "Command only support in cloud mode."); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/InternalErrorCode.java b/fe/fe-core/src/main/java/org/apache/doris/common/InternalErrorCode.java index 4382a7ea06ede5..73808e2354e805 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/InternalErrorCode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/InternalErrorCode.java @@ -36,7 +36,6 @@ public enum InternalErrorCode { CREATE_TASKS_ERR(103), TASKS_ABORT_ERR(104), CANNOT_RESUME_ERR(105), - TIMEOUT_TOO_MUCH(106), // for external catalog GET_REMOTE_DATA_ERROR(202), diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/AuthProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/AuthProcDir.java index b05284d59a0538..0fa5b5d5b4179b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/AuthProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/AuthProcDir.java @@ -33,7 +33,7 @@ public class AuthProcDir implements ProcDirInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() .add("UserIdentity").add("Comment").add("Password").add("Roles").add("GlobalPrivs").add("CatalogPrivs") .add("DatabasePrivs").add("TablePrivs").add("ColPrivs").add("ResourcePrivs").add("CloudClusterPrivs") - .add("CloudStagePrivs").add("StorageVaultPrivs").add("WorkloadGroupPrivs") + .add("CloudStagePrivs").add("StorageVaultPrivs").add("WorkloadGroupPrivs").add("ComputeGroupPrivs") .build(); private Auth auth; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java index 7828a38e6eb242..d2300cd667d77c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ExecutionProfile.java @@ -17,17 +17,14 @@ package org.apache.doris.common.profile; -import org.apache.doris.catalog.Env; import org.apache.doris.common.Pair; import org.apache.doris.common.Status; import org.apache.doris.common.util.DebugUtil; import org.apache.doris.common.util.RuntimeProfile; import org.apache.doris.planner.PlanFragmentId; -import org.apache.doris.system.Backend; import org.apache.doris.thrift.TDetailedReportParams; import org.apache.doris.thrift.TNetworkAddress; import org.apache.doris.thrift.TQueryProfile; -import org.apache.doris.thrift.TReportExecStatusParams; import org.apache.doris.thrift.TRuntimeProfileTree; import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TUniqueId; @@ -235,8 +232,6 @@ public Status updateProfile(TQueryProfile profile, TNetworkAddress backendHBAddr List fragmentProfile = entry.getValue(); int pipelineIdx = 0; List taskProfile = Lists.newArrayList(); - // The naming rule must be same with the one in updateProfile(TReportExecStatusParams params) - // Because we relay on the name of RuntimeProfile to eliminate the duplicate profile String suffix = " (host=" + backendHBAddress + ")"; for (TDetailedReportParams pipelineProfile : fragmentProfile) { String name = ""; @@ -246,6 +241,7 @@ public Status updateProfile(TQueryProfile profile, TNetworkAddress backendHBAddr name = "Pipeline :" + pipelineIdx + " " + suffix; pipelineIdx++; } + RuntimeProfile profileNode = new RuntimeProfile(name); // The taskprofile is used to save the profile of the pipeline, without // considering the FragmentLevel. @@ -273,54 +269,6 @@ public Status updateProfile(TQueryProfile profile, TNetworkAddress backendHBAddr return new Status(TStatusCode.OK, "Success"); } - public void updateProfile(TReportExecStatusParams params) { - Backend backend = null; - if (params.isSetBackendId()) { - backend = Env.getCurrentSystemInfo().getBackend(params.getBackendId()); - if (backend == null) { - LOG.warn("could not find backend with id {}", params.getBackendId()); - return; - } - } else { - LOG.warn("backend id is not set in report profile request, bad message"); - return; - } - - int pipelineIdx = 0; - List taskProfile = Lists.newArrayList(); - String suffix = " (host=" + backend.getHeartbeatAddress() + ")"; - // Each datailed report params is a fragment level profile or a pipeline profile - for (TDetailedReportParams param : params.detailed_report) { - String name = ""; - if (param.isSetIsFragmentLevel() && param.is_fragment_level) { - name = "Fragment Level Profile: " + suffix; - } else { - name = "Pipeline :" + pipelineIdx + " " + suffix; - pipelineIdx++; - } - RuntimeProfile profile = new RuntimeProfile(name); - // The taskprofile is used to save the profile of the pipeline, without - // considering the FragmentLevel. - if (!(param.isSetIsFragmentLevel() && param.is_fragment_level)) { - taskProfile.add(profile); - } - if (param.isSetProfile()) { - profile.update(param.profile); - } - if (params.done) { - profile.setIsDone(true); - } - profile.sortChildren(); - fragmentProfiles.get(params.fragment_id).addChild(profile); - } - // TODO ygl: is this right? there maybe multi Backends, what does - // update load profile do??? - if (params.isSetLoadChannelProfile()) { - loadChannelProfile.update(params.loadChannelProfile); - } - setMultiBeProfile(params.fragment_id, backend.getHeartbeatAddress(), taskProfile); - } - public synchronized void addFragmentBackend(PlanFragmentId fragmentId, Long backendId) { fragmentIdBeNum.put(fragmentId.asInt(), fragmentIdBeNum.get(fragmentId.asInt()) + 1); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/Profile.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/Profile.java index 88fd317879451e..5cd489d11c0f74 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/Profile.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/Profile.java @@ -116,6 +116,8 @@ public class Profile { public Map rowsProducedMap = new HashMap<>(); private List physicalRelations = new ArrayList<>(); + private String changedSessionVarCache = ""; + // Need default constructor for read from storage public Profile() {} @@ -318,8 +320,9 @@ public String getProfileByLevel() { StringBuilder builder = new StringBuilder(); // add summary to builder summaryProfile.prettyPrint(builder); - // read execution profile from storage or generate it from memory (during query execution) + getChangedSessionVars(builder); getExecutionProfileContent(builder); + getOnStorageProfile(builder); return builder.toString(); } @@ -363,49 +366,13 @@ public String getProfileBrief() { return gson.toJson(rootProfile.toBrief()); } - // Read file if profile has been stored to storage. + // Return if profile has been stored to storage public void getExecutionProfileContent(StringBuilder builder) { if (builder == null) { builder = new StringBuilder(); } if (profileHasBeenStored()) { - LOG.info("Profile {} has been stored to storage, reading it from storage", id); - - FileInputStream fileInputStream = null; - - try { - fileInputStream = createPorfileFileInputStream(profileStoragePath); - if (fileInputStream == null) { - builder.append("Failed to read execution profile from " + profileStoragePath); - return; - } - - DataInputStream dataInput = new DataInputStream(fileInputStream); - // skip summary profile - Text.readString(dataInput); - // read compressed execution profile - int binarySize = dataInput.readInt(); - byte[] binaryExecutionProfile = new byte[binarySize]; - dataInput.readFully(binaryExecutionProfile, 0, binarySize); - // decompress binary execution profile - String textExecutionProfile = decompressExecutionProfile(binaryExecutionProfile); - builder.append(textExecutionProfile); - return; - } catch (Exception e) { - LOG.error("An error occurred while reading execution profile from storage, profile storage path: {}", - profileStoragePath, e); - builder.append("Failed to read execution profile from " + profileStoragePath); - } finally { - if (fileInputStream != null) { - try { - fileInputStream.close(); - } catch (Exception e) { - LOG.warn("Close profile {} failed", profileStoragePath, e); - } - } - } - return; } @@ -473,8 +440,9 @@ public void setSummaryProfile(SummaryProfile summaryProfile) { this.summaryProfile = summaryProfile; } - public void releaseExecutionProfile() { + public void releaseMemory() { this.executionProfiles.clear(); + this.changedSessionVarCache = ""; } public boolean shouldStoreToStorage() { @@ -490,7 +458,9 @@ public boolean shouldStoreToStorage() { boolean hasReportingProfile = false; if (this.executionProfiles.isEmpty()) { - LOG.warn("Profile {} has no execution profile, it is abnormal", id); + // Query finished, but no execution profile. + // 1. Query is executed on FE. + // 2. Not a SELECT query, just a DDL. return false; } @@ -603,6 +573,7 @@ public void writeToStorage(String systemProfileStorageDir) { // store execution profiles as string StringBuilder build = new StringBuilder(); + getChangedSessionVars(build); getExecutionProfileContent(build); byte[] buf = compressExecutionProfile(build.toString()); dataOutputStream.writeInt(buf.length); @@ -692,4 +663,65 @@ private void updateActualRowCountOnPhysicalPlan(Plan plan) { updateActualRowCountOnPhysicalPlan(child); } } + + public void setChangedSessionVar(String changedSessionVar) { + this.changedSessionVarCache = changedSessionVar; + } + + private void getChangedSessionVars(StringBuilder builder) { + if (builder == null) { + builder = new StringBuilder(); + } + if (profileHasBeenStored()) { + return; + } + + builder.append("\nChanged Session Variables:\n"); + builder.append(changedSessionVarCache); + builder.append("\n"); + } + + private void getOnStorageProfile(StringBuilder builder) { + if (!profileHasBeenStored()) { + return; + } + + LOG.info("Profile {} has been stored to storage, reading it from storage", id); + + FileInputStream fileInputStream = null; + + try { + fileInputStream = createPorfileFileInputStream(profileStoragePath); + if (fileInputStream == null) { + builder.append("Failed to read execution profile from " + profileStoragePath); + return; + } + + DataInputStream dataInput = new DataInputStream(fileInputStream); + // skip summary profile + Text.readString(dataInput); + // read compressed execution profile + int binarySize = dataInput.readInt(); + byte[] binaryExecutionProfile = new byte[binarySize]; + dataInput.readFully(binaryExecutionProfile, 0, binarySize); + // decompress binary execution profile + String textExecutionProfile = decompressExecutionProfile(binaryExecutionProfile); + builder.append(textExecutionProfile); + return; + } catch (Exception e) { + LOG.error("An error occurred while reading execution profile from storage, profile storage path: {}", + profileStoragePath, e); + builder.append("Failed to read execution profile from " + profileStoragePath); + } finally { + if (fileInputStream != null) { + try { + fileInputStream.close(); + } catch (Exception e) { + LOG.warn("Close profile {} failed", profileStoragePath, e); + } + } + } + + return; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/publish/TopicPublisherThread.java b/fe/fe-core/src/main/java/org/apache/doris/common/publish/TopicPublisherThread.java index 797b0893936513..df3b06e8271602 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/publish/TopicPublisherThread.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/publish/TopicPublisherThread.java @@ -35,7 +35,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -76,18 +75,24 @@ protected void runAfterCatalogReady() { // because it may means workload group/policy is dropped // step 2: publish topic info to all be - Collection nodesToPublish; + List nodesToPublish = new ArrayList<>(); try { - nodesToPublish = clusterInfoService.getAllBackendsByAllCluster().values(); + for (Backend be : clusterInfoService.getAllBackendsByAllCluster().values()) { + if (be.isAlive()) { + nodesToPublish.add(be); + } + } } catch (Exception e) { LOG.warn("get backends failed", e); return; } + if (nodesToPublish.isEmpty()) { + LOG.info("no alive backend, skip publish topic"); + return; + } AckResponseHandler handler = new AckResponseHandler(nodesToPublish); for (Backend be : nodesToPublish) { - if (be.isAlive()) { - executor.submit(new TopicPublishWorker(request, be, handler)); - } + executor.submit(new TopicPublishWorker(request, be, handler)); } try { int timeoutMs = Config.publish_topic_info_interval_ms / 3 * 2; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/DebugUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/DebugUtil.java index 937c74cac66bb5..39c4cd91904775 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/DebugUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/DebugUtil.java @@ -26,6 +26,7 @@ import java.io.PrintWriter; import java.io.StringWriter; import java.text.DecimalFormat; +import java.util.List; import java.util.UUID; public class DebugUtil { @@ -177,4 +178,62 @@ public static String getStackTrace(Exception e) { e.printStackTrace(new PrintWriter(sw)); return sw.toString(); } + + public static String prettyPrintChangedSessionVar(List> nestedList) { + if (nestedList == null || nestedList.isEmpty()) { + return ""; + } + + StringBuilder output = new StringBuilder(); + + // Assuming each inner list has exactly 3 columns + int[] columnWidths = new int[3]; + + // Calculate the maximum width of each column + // First consider the header widths: "VarName", "CurrentValue", "DefaultValue" + String[] headers = {"VarName", "CurrentValue", "DefaultValue"}; + for (int i = 0; i < headers.length; i++) { + columnWidths[i] = headers[i].length(); // Initialize with header length + } + + // Update column widths based on data + for (List row : nestedList) { + for (int i = 0; i < row.size(); i++) { + columnWidths[i] = Math.max(columnWidths[i], row.get(i).length()); + } + } + + // Build the table header + for (int i = 0; i < headers.length; i++) { + output.append(String.format("%-" + columnWidths[i] + "s", headers[i])); + if (i < headers.length - 1) { + output.append(" | "); // Separator between columns + } + } + output.append("\n"); // Newline after the header + + // Add a separator line for better readability (optional) + for (int i = 0; i < headers.length; i++) { + output.append(String.format("%-" + columnWidths[i] + "s", Strings.repeat("-", columnWidths[i]))); + if (i < headers.length - 1) { + output.append("-|-"); // Separator between columns + } + } + output.append("\n"); // Newline after the separator + + // Build the table body with proper alignment based on column widths + for (List row : nestedList) { + for (int i = 0; i < row.size(); i++) { + String element = row.get(i); + // Pad with spaces if the element is shorter than the column width + output.append(String.format("%-" + columnWidths[i] + "s", element)); + if (i < row.size() - 1) { + output.append(" | "); // Separator between columns + } + } + output.append("\n"); // Newline after each row + } + + return output.toString(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/FileFormatUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/FileFormatUtils.java index 0b646a00b164d1..15240f103b0e51 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/FileFormatUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/FileFormatUtils.java @@ -26,6 +26,7 @@ import com.google.common.base.Strings; import java.util.List; +import java.util.Optional; import java.util.regex.Matcher; public class FileFormatUtils { @@ -105,4 +106,18 @@ public static void parseCsvSchema(List csvSchema, String csvSchemaStr) throw new AnalysisException("invalid csv schema: " + e.getMessage()); } } + + public static Optional getFileFormatBySuffix(String filename) { + String fileString = filename.toLowerCase(); + if (fileString.endsWith(".avro")) { + return Optional.of("avro"); + } else if (fileString.endsWith(".orc")) { + return Optional.of("orc"); + } else if (fileString.endsWith(".parquet")) { + return Optional.of("parquet"); + } else { + // Unable to get file format from file path + return Optional.empty(); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/InternalDatabaseUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/InternalDatabaseUtil.java index f255b794cbd4ad..4b915a9171ff36 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/InternalDatabaseUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/InternalDatabaseUtil.java @@ -30,7 +30,8 @@ public static void checkDatabase(String dbName, ConnectContext ctx) throws Analy if (!FeConstants.INTERNAL_DB_NAME.equals(dbName)) { return; } - if (ctx == null || ctx.getCurrentUserIdentity() == null || !ctx.getCurrentUserIdentity().isRootUser()) { + if (ctx == null || ctx.getCurrentUserIdentity() == null + || !ctx.getCurrentUserIdentity().isRootUser() && !ctx.getCurrentUserIdentity().isAdminUser()) { throw new AnalysisException("Not allowed to operate database: " + dbName); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java index 16afbcecdaea5c..ffd411d0cf3ea1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java @@ -127,8 +127,14 @@ public static void writeUnlockTables(List tableList) { } public static void commitLockTables(List
tableList) { - for (Table table : tableList) { - table.commitLock(); + for (int i = 0; i < tableList.size(); i++) { + try { + tableList.get(i).commitLock(); + } catch (Exception e) { + for (int j = i - 1; j >= 0; j--) { + tableList.get(i).commitUnlock(); + } + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java index eff741e408008d..c6fc3307fe0b29 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java @@ -159,6 +159,7 @@ public static ProfileManager getInstance() { // The visiablity of ProfileManager() is package level, so that we can write ut for it. ProfileManager() { + super("profile-manager", Config.profile_manager_gc_interval_seconds * 1000); lock = new ReentrantReadWriteLock(true); readLock = lock.readLock(); writeLock = lock.writeLock(); @@ -686,7 +687,7 @@ private void writeProfileToStorage() { for (ExecutionProfile executionProfile : profileElement.profile.getExecutionProfiles()) { this.queryIdToExecutionProfiles.remove(executionProfile.getQueryId()); } - profileElement.profile.releaseExecutionProfile(); + profileElement.profile.releaseMemory(); } } finally { writeLock.unlock(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 62c643df5c69fa..db387ffc9d5434 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -35,7 +35,6 @@ import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; import org.apache.doris.datasource.CatalogIf; -import org.apache.doris.datasource.CatalogMgr; import org.apache.doris.datasource.ExternalCatalog; import org.apache.doris.policy.Policy; import org.apache.doris.policy.StoragePolicy; @@ -83,6 +82,7 @@ public class PropertyAnalyzer { public static final String PROPERTIES_SCHEMA_VERSION = "schema_version"; public static final String PROPERTIES_PARTITION_ID = "partition_id"; public static final String PROPERTIES_VISIBLE_VERSION = "visible_version"; + public static final String PROPERTIES_IN_ATOMIC_RESTORE = "in_atomic_restore"; public static final String PROPERTIES_BF_COLUMNS = "bloom_filter_columns"; public static final String PROPERTIES_BF_FPP = "bloom_filter_fpp"; @@ -1559,16 +1559,6 @@ public static void checkCatalogProperties(Map properties, boolea // "access_controller.properties.prop2" = "yyy", // ) // 1. get access controller class - String acClass = properties.getOrDefault(CatalogMgr.ACCESS_CONTROLLER_CLASS_PROP, ""); - if (!Strings.isNullOrEmpty(acClass)) { - // 2. check if class exists - try { - Class.forName(acClass); - } catch (ClassNotFoundException e) { - throw new AnalysisException("failed to find class " + acClass, e); - } - } - if (isAlter) { // The 'use_meta_cache' property can not be modified if (properties.containsKey(ExternalCatalog.USE_META_CACHE)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java index 8759f9f5ef2f04..cc40ad292ce182 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalMetaCacheMgr.java @@ -101,7 +101,7 @@ public ExternalMetaCacheMgr() { commonRefreshExecutor = ThreadPoolManager.newDaemonFixedThreadPool( Config.max_external_cache_loader_thread_pool_size, - Config.max_external_cache_loader_thread_pool_size * 1000, + Config.max_external_cache_loader_thread_pool_size * 10000, "CommonRefreshExecutor", 10, true); // The queue size should be large enough, diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/FileQueryScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/FileQueryScanNode.java index 588ea57289ae3a..204a0ca440fe49 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/FileQueryScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/FileQueryScanNode.java @@ -111,6 +111,7 @@ public void init(Analyzer analyzer) throws UserException { ConnectContext.get().getExecutor().getSummaryProfile().setInitScanNodeStartTime(); } super.init(analyzer); + initFileSplitSize(); doInitialize(); if (ConnectContext.get().getExecutor() != null) { ConnectContext.get().getExecutor().getSummaryProfile().setInitScanNodeFinishTime(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/FileScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/FileScanNode.java index efb6169e19074e..5d47f3eac733b8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/FileScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/FileScanNode.java @@ -82,7 +82,7 @@ public void init() throws UserException { initFileSplitSize(); } - private void initFileSplitSize() { + protected void initFileSplitSize() { this.fileSplitSize = ConnectContext.get().getSessionVariable().getFileSplitSize(); this.isSplitSizeSetBySession = this.fileSplitSize > 0; if (this.fileSplitSize <= 0) { @@ -126,6 +126,7 @@ public String getNodeExplainString(String prefix, TExplainLevel detailLevel) { output.append(prefix); if (isBatchMode()) { output.append("(approximate)"); + splitAssignment.stop(); } output.append("inputSplitNum=").append(selectedSplitNum).append(", totalFileSize=") .append(totalFileSize).append(", scanRanges=").append(scanRangeLocations.size()).append("\n"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index bf19bdc37c190c..36ea8ff80f9c08 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -59,6 +59,7 @@ import org.apache.doris.analysis.TableRef; import org.apache.doris.analysis.TruncateTableStmt; import org.apache.doris.analysis.TypeDef; +import org.apache.doris.backup.RestoreJob; import org.apache.doris.catalog.BinlogConfig; import org.apache.doris.catalog.BrokerTable; import org.apache.doris.catalog.ColocateGroupSchema; @@ -928,10 +929,16 @@ public void dropTable(DropTableStmt stmt) throws DdlException { OlapTable olapTable = (OlapTable) table; if ((olapTable.getState() != OlapTableState.NORMAL)) { throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState() - + ", cannot be dropped." + " please cancel the operation on olap table firstly." + + ", cannot be dropped. please cancel the operation on olap table firstly." + " If you want to forcibly drop(cannot be recovered)," + " please use \"DROP table FORCE\"."); } + if (olapTable.isInAtomicRestore()) { + throw new DdlException("The table [" + tableName + "]'s state is in atomic restore" + + ", cannot be dropped. please cancel the restore operation on olap table" + + " firstly. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP table FORCE\"."); + } } dropTableInternal(db, table, stmt.isForceDrop(), watch, costTimes); @@ -1226,6 +1233,11 @@ public boolean createTable(CreateTableStmt stmt) throws UserException { ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); } } + if (db.getTable(RestoreJob.tableAliasWithAtomicRestore(tableName)).isPresent()) { + ErrorReport.reportDdlException( + "table[{}] is in atomic restore, please cancel the restore operation firstly", + ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); + } if (engineName.equals("olap")) { return createOlapTable(db, stmt); @@ -1758,8 +1770,7 @@ public PartitionPersistInfo addPartition(Database db, String tableName, AddParti singlePartitionDesc.isInMemory(), singlePartitionDesc.getTabletType(), storagePolicy, idGeneratorBuffer, - binlogConfig, dataProperty.isStorageMediumSpecified(), null); - // TODO cluster key ids + binlogConfig, dataProperty.isStorageMediumSpecified()); // check again olapTable = db.getOlapTableOrDdlException(tableName); @@ -2003,7 +2014,7 @@ public void dropPartitionWithoutCheck(Database db, OlapTable olapTable, String p // it does not affect the logic of deleting the partition try { Env.getCurrentEnv().getEventProcessor().processEvent( - new DropPartitionEvent(db.getCatalog().getId(), db.getId(), olapTable.getId())); + new DropPartitionEvent(db.getCatalog().getId(), db.getId(), olapTable.getId(), isTempPartition)); } catch (Throwable t) { // According to normal logic, no exceptions will be thrown, // but in order to avoid bugs affecting the original logic, all exceptions are caught @@ -2074,8 +2085,7 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa String storagePolicy, IdGeneratorBuffer idGeneratorBuffer, BinlogConfig binlogConfig, - boolean isStorageMediumSpecified, - List clusterKeyIndexes) + boolean isStorageMediumSpecified) throws DdlException { // create base index first. Preconditions.checkArgument(tbl.getBaseIndexId() != -1); @@ -2133,6 +2143,11 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa short shortKeyColumnCount = indexMeta.getShortKeyColumnCount(); TStorageType storageType = indexMeta.getStorageType(); List schema = indexMeta.getSchema(); + List clusterKeyIndexes = null; + if (indexId == tbl.getBaseIndexId()) { + // only base and shadow index need cluster key indexes + clusterKeyIndexes = OlapTable.getClusterKeyIndexes(schema); + } KeysType keysType = indexMeta.getKeysType(); List indexes = indexId == tbl.getBaseIndexId() ? tbl.getCopiedIndexes() : null; int totalTaskNum = index.getTablets().size() * totalReplicaNum; @@ -2164,7 +2179,11 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa task.setStorageFormat(tbl.getStorageFormat()); task.setInvertedIndexFileStorageFormat(tbl.getInvertedIndexFileStorageFormat()); - task.setClusterKeyIndexes(clusterKeyIndexes); + if (!CollectionUtils.isEmpty(clusterKeyIndexes)) { + task.setClusterKeyIndexes(clusterKeyIndexes); + LOG.info("table: {}, partition: {}, index: {}, tablet: {}, cluster key indexes: {}", + tbl.getId(), partitionId, indexId, tabletId, clusterKeyIndexes); + } batchTask.addTask(task); // add to AgentTaskQueue for handling finish report. // not for resending task @@ -2507,6 +2526,16 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx // use light schema change optimization olapTable.setEnableLightSchemaChange(enableLightSchemaChange); + // check if light schema change is disabled, variant type rely on light schema change + if (!enableLightSchemaChange) { + for (Column column : baseSchema) { + if (column.getType().isVariantType()) { + throw new DdlException("Variant type rely on light schema change, " + + " please use light_schema_change = true."); + } + } + } + boolean disableAutoCompaction = false; try { disableAutoCompaction = PropertyAnalyzer.analyzeDisableAutoCompaction(properties); @@ -2637,8 +2666,8 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx olapTable.setRowStorePageSize(rowStorePageSize); // check data sort properties - int keyColumnSize = CollectionUtils.isEmpty(keysDesc.getClusterKeysColumnIds()) ? keysDesc.keysColumnSize() : - keysDesc.getClusterKeysColumnIds().size(); + int keyColumnSize = CollectionUtils.isEmpty(keysDesc.getClusterKeysColumnNames()) ? keysDesc.keysColumnSize() : + keysDesc.getClusterKeysColumnNames().size(); DataSortInfo dataSortInfo = PropertyAnalyzer.analyzeDataSortInfo(properties, keysType, keyColumnSize, storageFormat); olapTable.setDataSortInfo(dataSortInfo); @@ -2650,6 +2679,10 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } + if (enableUniqueKeyMergeOnWrite && !enableLightSchemaChange && !CollectionUtils.isEmpty( + keysDesc.getClusterKeysColumnNames())) { + throw new DdlException("Unique merge-on-write table with cluster keys must enable light schema change"); + } } olapTable.setEnableUniqueKeyMergeOnWrite(enableUniqueKeyMergeOnWrite); @@ -2958,7 +2991,7 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx throw new DdlException("Sequence type only support integer types and date types"); } olapTable.setSequenceMapCol(col.getName()); - olapTable.setSequenceInfo(col.getType()); + olapTable.setSequenceInfo(col.getType(), col); } } catch (Exception e) { throw new DdlException(e.getMessage()); @@ -2972,24 +3005,21 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx throw new DdlException("The sequence_col and sequence_type cannot be set at the same time"); } if (sequenceColType != null) { - olapTable.setSequenceInfo(sequenceColType); + olapTable.setSequenceInfo(sequenceColType, null); } } catch (Exception e) { throw new DdlException(e.getMessage()); } - // analyse group commit interval ms - int groupCommitIntervalMs; try { - groupCommitIntervalMs = PropertyAnalyzer.analyzeGroupCommitIntervalMs(properties); + int groupCommitIntervalMs = PropertyAnalyzer.analyzeGroupCommitIntervalMs(properties); olapTable.setGroupCommitIntervalMs(groupCommitIntervalMs); } catch (Exception e) { throw new DdlException(e.getMessage()); } - int groupCommitDataBytes; try { - groupCommitDataBytes = PropertyAnalyzer.analyzeGroupCommitDataBytes(properties); + int groupCommitDataBytes = PropertyAnalyzer.analyzeGroupCommitDataBytes(properties); olapTable.setGroupCommitDataBytes(groupCommitDataBytes); } catch (Exception e) { throw new DdlException(e.getMessage()); @@ -3045,8 +3075,7 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx storagePolicy, idGeneratorBuffer, binlogConfigForTask, - partitionInfo.getDataProperty(partitionId).isStorageMediumSpecified(), - keysDesc.getClusterKeysColumnIds()); + partitionInfo.getDataProperty(partitionId).isStorageMediumSpecified()); afterCreatePartitions(db.getId(), olapTable.getId(), null, olapTable.getIndexIdList(), true); olapTable.addPartition(partition); @@ -3130,8 +3159,7 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx partitionInfo.getTabletType(entry.getValue()), partionStoragePolicy, idGeneratorBuffer, binlogConfigForTask, - dataProperty.isStorageMediumSpecified(), - keysDesc.getClusterKeysColumnIds()); + dataProperty.isStorageMediumSpecified()); olapTable.addPartition(partition); olapTable.getPartitionInfo().getDataProperty(partition.getId()) .setStoragePolicy(partionStoragePolicy); @@ -3554,14 +3582,6 @@ public void truncateTable(TruncateTableStmt truncateTableStmt) throws DdlExcepti Env.getCurrentInvertedIndex().deleteTablet(tabletId); } }; - Map clusterKeyMap = new TreeMap<>(); - for (int i = 0; i < olapTable.getBaseSchema().size(); i++) { - Column column = olapTable.getBaseSchema().get(i); - if (column.getClusterKeyId() != -1) { - clusterKeyMap.put(column.getClusterKeyId(), i); - } - } - List clusterKeyIdxes = clusterKeyMap.values().stream().collect(Collectors.toList()); try { long bufferSize = IdGeneratorUtil.getBufferSizeForTruncateTable(copiedTbl, origPartitions.values()); IdGeneratorBuffer idGeneratorBuffer = @@ -3597,8 +3617,7 @@ public void truncateTable(TruncateTableStmt truncateTableStmt) throws DdlExcepti copiedTbl.getPartitionInfo().getTabletType(oldPartitionId), olapTable.getPartitionInfo().getDataProperty(oldPartitionId).getStoragePolicy(), idGeneratorBuffer, binlogConfig, - copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId).isStorageMediumSpecified(), - clusterKeyIdxes); + copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId).isStorageMediumSpecified()); newPartitions.add(newPartition); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java index 8217f1c3a367df..1c1a28242f45fa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java @@ -603,9 +603,8 @@ private Optional getHiveColumnStats(String colName) { if (!parameters.containsKey(NUM_ROWS) || Long.parseLong(parameters.get(NUM_ROWS)) == 0) { return Optional.empty(); } - ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(); long count = Long.parseLong(parameters.get(NUM_ROWS)); - columnStatisticBuilder.setCount(count); + ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(count); // The tableStats length is at most 1. for (ColumnStatisticsObj tableStat : tableStats) { if (!tableStat.isSetStatsData()) { @@ -760,26 +759,26 @@ public MTMVSnapshotIf getTableSnapshot(MTMVRefreshContext context) throws Analys if (getPartitionType() == PartitionType.UNPARTITIONED) { return new MTMVMaxTimestampSnapshot(getName(), getLastDdlTime()); } - Long maxPartitionId = 0L; + HivePartition maxPartition = null; long maxVersionTime = 0L; long visibleVersionTime; HiveMetaStoreCache cache = Env.getCurrentEnv().getExtMetaCacheMgr() .getMetaStoreCache((HMSExternalCatalog) getCatalog()); HiveMetaStoreCache.HivePartitionValues hivePartitionValues = cache.getPartitionValues( getDbName(), getName(), getPartitionColumnTypes()); - BiMap idToName = hivePartitionValues.getPartitionNameToIdMap().inverse(); - if (MapUtils.isEmpty(idToName)) { - throw new AnalysisException("partitions is empty for : " + getName()); + List partitionList = cache.getAllPartitionsWithCache(getDbName(), getName(), + Lists.newArrayList(hivePartitionValues.getPartitionValuesMap().values())); + if (CollectionUtils.isEmpty(partitionList)) { + throw new AnalysisException("partitionList is empty, table name: " + getName()); } - for (Long partitionId : idToName.keySet()) { - visibleVersionTime = getHivePartitionByIdOrAnalysisException(partitionId, hivePartitionValues, - cache).getLastModifiedTime(); + for (HivePartition hivePartition : partitionList) { + visibleVersionTime = hivePartition.getLastModifiedTime(); if (visibleVersionTime > maxVersionTime) { maxVersionTime = visibleVersionTime; - maxPartitionId = partitionId; + maxPartition = hivePartition; } } - return new MTMVMaxTimestampSnapshot(idToName.get(maxPartitionId), maxVersionTime); + return new MTMVMaxTimestampSnapshot(maxPartition.getPartitionName(getPartitionColumns()), maxVersionTime); } private Long getPartitionIdByNameOrAnalysisException(String partitionName, diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java index 3483ca155ba6ae..fc275c2871ee9a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HiveMetaStoreCache.java @@ -149,7 +149,7 @@ private void init() { CacheFactory partitionCacheFactory = new CacheFactory( OptionalLong.of(28800L), - OptionalLong.of(Config.external_cache_expire_time_minutes_after_access * 60L), + OptionalLong.empty(), Config.max_hive_partition_cache_num, true, null); @@ -487,7 +487,8 @@ public List getFilesByPartitions(List partitions, List keys = partitions.stream().map(p -> p.isDummyPartition() ? FileCacheKey.createDummyCacheKey( p.getDbName(), p.getTblName(), p.getPath(), p.getInputFormat(), bindBrokerName) - : new FileCacheKey(p.getPath(), p.getInputFormat(), p.getPartitionValues(), bindBrokerName)) + : new FileCacheKey(p.getDbName(), p.getTblName(), p.getPath(), + p.getInputFormat(), p.getPartitionValues(), bindBrokerName)) .collect(Collectors.toList()); List fileLists; @@ -559,38 +560,19 @@ private List getAllPartitions(String dbName, String name, List
  • values : partitionValues.partitionValuesMap.values()) { - PartitionCacheKey partKey = new PartitionCacheKey(dbName, tblName, values); - HivePartition partition = partitionCache.getIfPresent(partKey); - if (partition != null) { - fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(), - null, partition.getPartitionValues(), null)); - partitionCache.invalidate(partKey); - } + partitionValuesCache.invalidate(new PartitionValueCacheKey(dbName, tblName, null)); + partitionCache.asMap().keySet().forEach(k -> { + if (k.isSameTable(dbName, tblName)) { + partitionCache.invalidate(k); } - partitionValuesCache.invalidate(key); - if (LOG.isDebugEnabled()) { - LOG.debug("invalid table cache for {}.{} in catalog {}, cache num: {}, cost: {} ms", - dbName, tblName, catalog.getName(), partitionValues.partitionValuesMap.size(), - (System.currentTimeMillis() - start)); + }); + long id = Util.genIdByName(dbName, tblName); + LoadingCache fileCache = fileCacheRef.get(); + fileCache.asMap().keySet().forEach(k -> { + if (k.isSameTable(id)) { + fileCache.invalidate(k); } - } else { - /** - * A file cache entry can be created reference to - * {@link org.apache.doris.planner.external.HiveSplitter#getSplits}, - * so we need to invalidate it if this is a non-partitioned table. - * We use {@link org.apache.doris.datasource.hive.HiveMetaStoreCache.FileCacheKey#createDummyCacheKey} - * to avoid invocation by Hms Client, because this method may be invoked when salve FE replay journal logs, - * and FE will exit if some network problems occur. - * */ - FileCacheKey fileCacheKey = FileCacheKey.createDummyCacheKey( - dbName, tblName, null, null, null); - fileCacheRef.get().invalidate(fileCacheKey); - } + }); } public void invalidatePartitionCache(String dbName, String tblName, String partitionName) { @@ -602,7 +584,7 @@ public void invalidatePartitionCache(String dbName, String tblName, String parti PartitionCacheKey partKey = new PartitionCacheKey(dbName, tblName, values); HivePartition partition = partitionCache.getIfPresent(partKey); if (partition != null) { - fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(), + fileCacheRef.get().invalidate(new FileCacheKey(dbName, tblName, partition.getPath(), null, partition.getPartitionValues(), null)); partitionCache.invalidate(partKey); } @@ -746,10 +728,21 @@ public void putPartitionValuesCacheForTest(PartitionValueCacheKey key, HiveParti * get fileCache ref * @return */ + @VisibleForTesting public AtomicReference> getFileCacheRef() { return fileCacheRef; } + @VisibleForTesting + public LoadingCache getPartitionValuesCache() { + return partitionValuesCache; + } + + @VisibleForTesting + public LoadingCache getPartitionCache() { + return partitionCache; + } + public List getFilesByTransaction(List partitions, ValidWriteIdList validWriteIds, boolean isFullAcid, long tableId, String bindBrokerName) { List fileCacheValues = Lists.newArrayList(); @@ -931,6 +924,10 @@ public boolean equals(Object obj) { && Objects.equals(values, ((PartitionCacheKey) obj).values); } + boolean isSameTable(String dbName, String tblName) { + return this.dbName.equals(dbName) && this.tblName.equals(tblName); + } + @Override public int hashCode() { return Objects.hash(dbName, tblName, values); @@ -955,18 +952,21 @@ public static class FileCacheKey { // e.g for file : hdfs://path/to/table/part1=a/part2=b/datafile // partitionValues would be ["part1", "part2"] protected List partitionValues; + private long id; - public FileCacheKey(String location, String inputFormat, List partitionValues, String bindBrokerName) { + public FileCacheKey(String dbName, String tblName, String location, String inputFormat, + List partitionValues, String bindBrokerName) { this.location = location; this.inputFormat = inputFormat; this.partitionValues = partitionValues == null ? Lists.newArrayList() : partitionValues; this.bindBrokerName = bindBrokerName; + this.id = Util.genIdByName(dbName, tblName); } public static FileCacheKey createDummyCacheKey(String dbName, String tblName, String location, String inputFormat, String bindBrokerName) { - FileCacheKey fileCacheKey = new FileCacheKey(location, inputFormat, null, bindBrokerName); + FileCacheKey fileCacheKey = new FileCacheKey(dbName, tblName, location, inputFormat, null, bindBrokerName); fileCacheKey.dummyKey = dbName + "." + tblName; return fileCacheKey; } @@ -986,6 +986,10 @@ public boolean equals(Object obj) { && Objects.equals(partitionValues, ((FileCacheKey) obj).partitionValues); } + boolean isSameTable(long id) { + return this.id == id; + } + @Override public int hashCode() { if (dummyKey != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java index db4161a4e237e3..634c596c69f63f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/source/HiveScanNode.java @@ -257,30 +257,32 @@ public void startSplit() { } try { splittersOnFlight.acquire(); - } catch (InterruptedException e) { + CompletableFuture.runAsync(() -> { + try { + List allFiles = Lists.newArrayList(); + getFileSplitByPartitions( + cache, Collections.singletonList(partition), allFiles, bindBrokerName); + if (allFiles.size() > numSplitsPerPartition.get()) { + numSplitsPerPartition.set(allFiles.size()); + } + splitAssignment.addToQueue(allFiles); + } catch (IOException e) { + batchException.set(new UserException(e.getMessage(), e)); + } finally { + splittersOnFlight.release(); + if (batchException.get() != null) { + splitAssignment.setException(batchException.get()); + } + if (numFinishedPartitions.incrementAndGet() == prunedPartitions.size()) { + splitAssignment.finishSchedule(); + } + } + }, scheduleExecutor); + } catch (Exception e) { + // When submitting a task, an exception will be thrown if the task pool(scheduleExecutor) is full batchException.set(new UserException(e.getMessage(), e)); break; } - CompletableFuture.runAsync(() -> { - try { - List allFiles = Lists.newArrayList(); - getFileSplitByPartitions(cache, Collections.singletonList(partition), allFiles, bindBrokerName); - if (allFiles.size() > numSplitsPerPartition.get()) { - numSplitsPerPartition.set(allFiles.size()); - } - splitAssignment.addToQueue(allFiles); - } catch (IOException e) { - batchException.set(new UserException(e.getMessage(), e)); - } finally { - splittersOnFlight.release(); - if (batchException.get() != null) { - splitAssignment.setException(batchException.get()); - } - if (numFinishedPartitions.incrementAndGet() == prunedPartitions.size()) { - splitAssignment.finishSchedule(); - } - } - }, scheduleExecutor); } if (batchException.get() != null) { splitAssignment.setException(batchException.get()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java index cd64efcd80f2cd..155afea1217064 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java @@ -98,6 +98,7 @@ public static JdbcClient createJdbcClient(JdbcClientConfig jdbcClientConfig) { } protected JdbcClient(JdbcClientConfig jdbcClientConfig) { + System.setProperty("com.zaxxer.hikari.useWeakReferences", "true"); this.catalogName = jdbcClientConfig.getCatalog(); this.jdbcUser = jdbcClientConfig.getUser(); this.isOnlySpecifiedDatabase = Boolean.parseBoolean(jdbcClientConfig.getOnlySpecifiedDatabase()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/MaxComputeExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/MaxComputeExternalCatalog.java index fcbc0a5e8fc0a3..cfcf4331b96a97 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/MaxComputeExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/MaxComputeExternalCatalog.java @@ -29,17 +29,12 @@ import com.aliyun.odps.Odps; import com.aliyun.odps.OdpsException; import com.aliyun.odps.Partition; -import com.aliyun.odps.Project; import com.aliyun.odps.account.Account; import com.aliyun.odps.account.AliyunAccount; -import com.aliyun.odps.security.SecurityManager; import com.aliyun.odps.table.configuration.SplitOptions; import com.aliyun.odps.table.enviroment.Credentials; import com.aliyun.odps.table.enviroment.EnvironmentSettings; -import com.aliyun.odps.utils.StringUtils; import com.google.common.collect.ImmutableList; -import com.google.gson.JsonObject; -import com.google.gson.JsonParser; import java.util.ArrayList; import java.util.Iterator; @@ -52,7 +47,6 @@ public class MaxComputeExternalCatalog extends ExternalCatalog { private String accessKey; private String secretKey; private String endpoint; - private String catalogOwner; private String defaultProject; private String quota; private EnvironmentSettings settings; @@ -128,25 +122,28 @@ public Odps getClient() { protected List listDatabaseNames() { List result = new ArrayList<>(); - try { - result.add(defaultProject); - if (StringUtils.isNullOrEmpty(catalogOwner)) { - SecurityManager sm = odps.projects().get().getSecurityManager(); - String whoami = sm.runQuery("whoami", false); - - JsonObject js = JsonParser.parseString(whoami).getAsJsonObject(); - catalogOwner = js.get("DisplayName").getAsString(); - } - Iterator iterator = odps.projects().iterator(catalogOwner); - while (iterator.hasNext()) { - Project project = iterator.next(); - if (!project.getName().equals(defaultProject)) { - result.add(project.getName()); - } - } - } catch (OdpsException e) { - throw new RuntimeException(e); - } + result.add(defaultProject); + + // TODO: Improve `show tables` and `select * from table` when `use other project`. + // try { + // result.add(defaultProject); + // if (StringUtils.isNullOrEmpty(catalogOwner)) { + // SecurityManager sm = odps.projects().get().getSecurityManager(); + // String whoami = sm.runQuery("whoami", false); + // + // JsonObject js = JsonParser.parseString(whoami).getAsJsonObject(); + // catalogOwner = js.get("DisplayName").getAsString(); + // } + // Iterator iterator = odps.projects().iterator(catalogOwner); + // while (iterator.hasNext()) { + // Project project = iterator.next(); + // if (!project.getName().equals(defaultProject)) { + // result.add(project.getName()); + // } + // } + // } catch (OdpsException e) { + // throw new RuntimeException(e); + // } return result; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/source/MaxComputeScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/source/MaxComputeScanNode.java index 521757da20e2ea..24e8e5ec4e2db9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/source/MaxComputeScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/maxcompute/source/MaxComputeScanNode.java @@ -112,7 +112,6 @@ private void setScanParams(TFileRangeDesc rangeDesc, MaxComputeSplit maxComputeS void createTableBatchReadSession() throws UserException { Predicate filterPredicate = convertPredicate(); - List requiredPartitionColumns = new ArrayList<>(); List orderedRequiredDataColumns = new ArrayList<>(); @@ -165,31 +164,30 @@ protected Predicate convertPredicate() { return Predicate.NO_PREDICATE; } - if (conjuncts.size() == 1) { + List odpsPredicates = new ArrayList<>(); + for (Expr dorisPredicate : conjuncts) { try { - return convertExprToOdpsPredicate(conjuncts.get(0)); + odpsPredicates.add(convertExprToOdpsPredicate(dorisPredicate)); } catch (AnalysisException e) { - Log.info("Failed to convert predicate " + conjuncts.get(0) + " to odps predicate"); + Log.info("Failed to convert predicate " + dorisPredicate); Log.info("Reason: " + e.getMessage()); - return Predicate.NO_PREDICATE; } } - com.aliyun.odps.table.optimizer.predicate.CompoundPredicate - filterPredicate = new com.aliyun.odps.table.optimizer.predicate.CompoundPredicate( - com.aliyun.odps.table.optimizer.predicate.CompoundPredicate.Operator.AND - ); - - for (Expr predicate : conjuncts) { - try { - filterPredicate.addPredicate(convertExprToOdpsPredicate(predicate)); - } catch (AnalysisException e) { - Log.info("Failed to convert predicate " + predicate); - Log.info("Reason: " + e.getMessage()); - return Predicate.NO_PREDICATE; + if (odpsPredicates.isEmpty()) { + return Predicate.NO_PREDICATE; + } else if (odpsPredicates.size() == 1) { + return odpsPredicates.get(0); + } else { + com.aliyun.odps.table.optimizer.predicate.CompoundPredicate + filterPredicate = new com.aliyun.odps.table.optimizer.predicate.CompoundPredicate( + com.aliyun.odps.table.optimizer.predicate.CompoundPredicate.Operator.AND); + + for (Predicate odpsPredicate : odpsPredicates) { + filterPredicate.addPredicate(odpsPredicate); } + return filterPredicate; } - return filterPredicate; } private Predicate convertExprToOdpsPredicate(Expr expr) throws AnalysisException { @@ -225,7 +223,7 @@ private Predicate convertExprToOdpsPredicate(Expr expr) throws AnalysisException InPredicate inPredicate = (InPredicate) expr; if (inPredicate.getChildren().size() > 2) { - return Predicate.NO_PREDICATE; + throw new AnalysisException("InPredicate must contain at most 1 children"); } com.aliyun.odps.table.optimizer.predicate.InPredicate.Operator odpsOp = inPredicate.isNotIn() @@ -335,7 +333,6 @@ private String convertSlotRefToColumnName(Expr expr) throws AnalysisException { throw new AnalysisException("Do not support convert [" + expr.getExprName() + "] in convertSlotRefToAttribute."); - } private String convertLiteralToOdpsValues(OdpsType odpsType, Expr expr) throws AnalysisException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java index fc76ddb0eeb4f3..e0d0f9a3ea2cd0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonScanNode.java @@ -22,6 +22,7 @@ import org.apache.doris.common.DdlException; import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.common.UserException; +import org.apache.doris.common.util.FileFormatUtils; import org.apache.doris.common.util.LocationPath; import org.apache.doris.datasource.FileQueryScanNode; import org.apache.doris.datasource.paimon.PaimonExternalCatalog; @@ -143,7 +144,7 @@ private void setPaimonParams(TFileRangeDesc rangeDesc, PaimonSplit paimonSplit) // use jni reader fileDesc.setPaimonSplit(encodeObjectToString(split)); } - fileDesc.setFileFormat(source.getFileFormat()); + fileDesc.setFileFormat(getFileFormat(paimonSplit.getPathString())); fileDesc.setPaimonPredicate(encodeObjectToString(predicates)); fileDesc.setPaimonColumnNames(source.getDesc().getSlots().stream().map(slot -> slot.getColumn().getName()) .collect(Collectors.joining(","))); @@ -180,19 +181,18 @@ public List getSplits() throws UserException { List paimonSplits = readBuilder.withFilter(predicates) .withProjection(projected) .newScan().plan().splits(); - boolean supportNative = supportNativeReader(); // Just for counting the number of selected partitions for this paimon table Set selectedPartitionValues = Sets.newHashSet(); for (org.apache.paimon.table.source.Split split : paimonSplits) { SplitStat splitStat = new SplitStat(); splitStat.setRowCount(split.rowCount()); - if (!forceJniScanner && supportNative && split instanceof DataSplit) { + if (!forceJniScanner && split instanceof DataSplit) { DataSplit dataSplit = (DataSplit) split; BinaryRow partitionValue = dataSplit.partition(); selectedPartitionValues.add(partitionValue); Optional> optRawFiles = dataSplit.convertToRawFiles(); Optional> optDeletionFiles = dataSplit.deletionFiles(); - if (optRawFiles.isPresent()) { + if (supportNativeReader(optRawFiles)) { splitStat.setType(SplitReadType.NATIVE); splitStat.setRawFileConvertable(true); List rawFiles = optRawFiles.get(); @@ -262,15 +262,22 @@ public List getSplits() throws UserException { return splits; } - private boolean supportNativeReader() { - String fileFormat = source.getFileFormat().toLowerCase(); - switch (fileFormat) { - case "orc": - case "parquet": - return true; - default: + private String getFileFormat(String path) { + return FileFormatUtils.getFileFormatBySuffix(path).orElse(source.getFileFormatFromTableProperties()); + } + + private boolean supportNativeReader(Optional> optRawFiles) { + if (!optRawFiles.isPresent()) { + return false; + } + List files = optRawFiles.get().stream().map(RawFile::path).collect(Collectors.toList()); + for (String f : files) { + String splitFileFormat = getFileFormat(f); + if (!splitFileFormat.equals("orc") && !splitFileFormat.equals("parquet")) { return false; + } } + return true; } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonSource.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonSource.java index f731a99d8e51f1..885eba06ed956d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonSource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/source/PaimonSource.java @@ -59,7 +59,7 @@ public ExternalCatalog getCatalog() { return paimonExtTable.getCatalog(); } - public String getFileFormat() { - return originTable.options().getOrDefault(PaimonProperties.FILE_FORMAT, "orc"); + public String getFileFormatFromTableProperties() { + return originTable.options().getOrDefault(PaimonProperties.FILE_FORMAT, "parquet"); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/event/DropPartitionEvent.java b/fe/fe-core/src/main/java/org/apache/doris/event/DropPartitionEvent.java index 598768aa8de724..cea03e7ef79a80 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/event/DropPartitionEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/event/DropPartitionEvent.java @@ -20,7 +20,14 @@ import org.apache.doris.common.AnalysisException; public class DropPartitionEvent extends TableEvent { - public DropPartitionEvent(long ctlId, long dbId, long tableId) throws AnalysisException { + private boolean isTempPartition; + + public DropPartitionEvent(long ctlId, long dbId, long tableId, boolean isTempPartition) throws AnalysisException { super(EventType.DROP_PARTITION, ctlId, dbId, tableId); + this.isTempPartition = isTempPartition; + } + + public boolean isTempPartition() { + return isTempPartition; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java index 087511654815a4..d751f72f719f61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/controller/HardwareInfoController.java @@ -20,6 +20,8 @@ import org.apache.doris.common.Version; import org.apache.doris.httpv2.entity.ResponseEntityBuilder; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; @@ -51,6 +53,8 @@ @RequestMapping("/rest/v1") public class HardwareInfoController { + private static final Logger LOG = LogManager.getLogger(HardwareInfoController.class); + @RequestMapping(path = "/hardware_info/fe", method = RequestMethod.GET) public Object index() { Map> map = new HashMap<>(); @@ -69,136 +73,163 @@ private void appendVersionInfo(Map> content) { } private void appendHardwareInfo(Map> content) { - SystemInfo si = new SystemInfo(); - OperatingSystem os = si.getOperatingSystem(); - HardwareAbstractionLayer hal = si.getHardware(); - CentralProcessor processor = hal.getProcessor(); - GlobalMemory memory = hal.getMemory(); Map map = new HashMap<>(); - map.put("OS", String.join("
    ", getOperatingSystem(os))); - map.put("Processor", String.join("
    ", getProcessor(processor))); - map.put("Memory", String.join("
    ", getMemory(memory))); - map.put("Processes", String.join("
    ", getProcesses(os, memory))); - map.put("Disk", String.join("
    ", getDisks(hal.getDiskStores()))); - map.put("FileSystem", String.join("
    ", getFileSystem(os.getFileSystem()))); - map.put("NetworkInterface", String.join("
    ", getNetworkInterfaces(hal.getNetworkIFs()))); - map.put("NetworkParameter", String.join("
    ", getNetworkParameters(os.getNetworkParams()))); + try { + SystemInfo si = new SystemInfo(); + OperatingSystem os = si.getOperatingSystem(); + HardwareAbstractionLayer hal = si.getHardware(); + CentralProcessor processor = hal.getProcessor(); + GlobalMemory memory = hal.getMemory(); + + map.put("OS", String.join("
    ", getOperatingSystem(os))); + map.put("Processor", String.join("
    ", getProcessor(processor))); + map.put("Memory", String.join("
    ", getMemory(memory))); + map.put("Processes", String.join("
    ", getProcesses(os, memory))); + map.put("Disk", String.join("
    ", getDisks(hal.getDiskStores()))); + map.put("FileSystem", String.join("
    ", getFileSystem(os.getFileSystem()))); + map.put("NetworkInterface", String.join("
    ", getNetworkInterfaces(hal.getNetworkIFs()))); + map.put("NetworkParameter", String.join("
    ", getNetworkParameters(os.getNetworkParams()))); + } catch (Exception e) { + // If we can't get hardware info, we should not throw exception + // don't use log.warn + LOG.info("Failed to get hardware info", e); + } content.put("HardwareInfo", map); + } private List getOperatingSystem(OperatingSystem os) { - List osInfo = new ArrayList<>(); - osInfo.add(String.valueOf(os)); - osInfo.add("Booted: " + Instant.ofEpochSecond(os.getSystemBootTime())); - osInfo.add("Uptime: " + FormatUtil.formatElapsedSecs(os.getSystemUptime())); - osInfo.add("Running with" + (os.isElevated() ? "" : "out") + " elevated permissions."); - return osInfo; + try { + List osInfo = new ArrayList<>(); + osInfo.add(String.valueOf(os)); + osInfo.add("Booted: " + Instant.ofEpochSecond(os.getSystemBootTime())); + osInfo.add("Uptime: " + FormatUtil.formatElapsedSecs(os.getSystemUptime())); + osInfo.add("Running with" + (os.isElevated() ? "" : "out") + " elevated permissions."); + return osInfo; + } catch (Exception e) { + LOG.info("Failed to get operating system info", e); + } + return new ArrayList<>(); } private List getProcessor(CentralProcessor processor) { List processorInfo = new ArrayList<>(); - processorInfo.add(String.valueOf(processor)); - processorInfo.add(" " + processor.getPhysicalPackageCount() + " physical CPU package(s)"); - processorInfo.add(" " + processor.getPhysicalProcessorCount() + " physical CPU core(s)"); - processorInfo.add(" " + processor.getLogicalProcessorCount() + " logical CPU(s)"); + try { + processorInfo.add(String.valueOf(processor)); + processorInfo.add(" " + processor.getPhysicalPackageCount() + " physical CPU package(s)"); + processorInfo.add(" " + processor.getPhysicalProcessorCount() + " physical CPU core(s)"); + processorInfo.add(" " + processor.getLogicalProcessorCount() + " logical CPU(s)"); - processorInfo.add("Identifier:   " + processor.getProcessorIdentifier().getIdentifier()); - processorInfo.add("ProcessorID:   " + processor.getProcessorIdentifier().getProcessorID()); - processorInfo.add("Context Switches/Interrupts:   " + processor.getContextSwitches() - + " / " + processor.getInterrupts() + "
    "); + processorInfo.add("Identifier:   " + processor.getProcessorIdentifier().getIdentifier()); + processorInfo.add("ProcessorID:   " + processor.getProcessorIdentifier().getProcessorID()); + processorInfo.add("Context Switches/Interrupts:   " + processor.getContextSwitches() + + " / " + processor.getInterrupts() + "
    "); - long[] prevTicks = processor.getSystemCpuLoadTicks(); - long[][] prevProcTicks = processor.getProcessorCpuLoadTicks(); - processorInfo.add("CPU, IOWait, and IRQ ticks @ 0 sec:  " + Arrays.toString(prevTicks)); - // Wait a second... - Util.sleep(1000); - long[] ticks = processor.getSystemCpuLoadTicks(); - processorInfo.add("CPU, IOWait, and IRQ ticks @ 1 sec:  " + Arrays.toString(ticks)); - long user = ticks[CentralProcessor.TickType.USER.getIndex()] - - prevTicks[CentralProcessor.TickType.USER.getIndex()]; - long nice = ticks[CentralProcessor.TickType.NICE.getIndex()] - - prevTicks[CentralProcessor.TickType.NICE.getIndex()]; - long sys = ticks[CentralProcessor.TickType.SYSTEM.getIndex()] - - prevTicks[CentralProcessor.TickType.SYSTEM.getIndex()]; - long idle = ticks[CentralProcessor.TickType.IDLE.getIndex()] - - prevTicks[CentralProcessor.TickType.IDLE.getIndex()]; - long iowait = ticks[CentralProcessor.TickType.IOWAIT.getIndex()] - - prevTicks[CentralProcessor.TickType.IOWAIT.getIndex()]; - long irq = ticks[CentralProcessor.TickType.IRQ.getIndex()] - - prevTicks[CentralProcessor.TickType.IRQ.getIndex()]; - long softirq = ticks[CentralProcessor.TickType.SOFTIRQ.getIndex()] - - prevTicks[CentralProcessor.TickType.SOFTIRQ.getIndex()]; - long steal = ticks[CentralProcessor.TickType.STEAL.getIndex()] - - prevTicks[CentralProcessor.TickType.STEAL.getIndex()]; - long totalCpu = user + nice + sys + idle + iowait + irq + softirq + steal; + long[] prevTicks = processor.getSystemCpuLoadTicks(); + long[][] prevProcTicks = processor.getProcessorCpuLoadTicks(); + processorInfo.add("CPU, IOWait, and IRQ ticks @ 0 sec:  " + Arrays.toString(prevTicks)); + // Wait a second... + Util.sleep(1000); + long[] ticks = processor.getSystemCpuLoadTicks(); + processorInfo.add("CPU, IOWait, and IRQ ticks @ 1 sec:  " + Arrays.toString(ticks)); + long user = ticks[CentralProcessor.TickType.USER.getIndex()] + - prevTicks[CentralProcessor.TickType.USER.getIndex()]; + long nice = ticks[CentralProcessor.TickType.NICE.getIndex()] + - prevTicks[CentralProcessor.TickType.NICE.getIndex()]; + long sys = ticks[CentralProcessor.TickType.SYSTEM.getIndex()] + - prevTicks[CentralProcessor.TickType.SYSTEM.getIndex()]; + long idle = ticks[CentralProcessor.TickType.IDLE.getIndex()] + - prevTicks[CentralProcessor.TickType.IDLE.getIndex()]; + long iowait = ticks[CentralProcessor.TickType.IOWAIT.getIndex()] + - prevTicks[CentralProcessor.TickType.IOWAIT.getIndex()]; + long irq = ticks[CentralProcessor.TickType.IRQ.getIndex()] + - prevTicks[CentralProcessor.TickType.IRQ.getIndex()]; + long softirq = ticks[CentralProcessor.TickType.SOFTIRQ.getIndex()] + - prevTicks[CentralProcessor.TickType.SOFTIRQ.getIndex()]; + long steal = ticks[CentralProcessor.TickType.STEAL.getIndex()] + - prevTicks[CentralProcessor.TickType.STEAL.getIndex()]; + long totalCpu = user + nice + sys + idle + iowait + irq + softirq + steal; - processorInfo.add(String.format( - "User: %.1f%% Nice: %.1f%% System: %.1f%% Idle:" - + " %.1f%% IOwait: %.1f%% IRQ: %.1f%% SoftIRQ: %.1f%% Steal: %.1f%%", - 100d * user / totalCpu, 100d * nice / totalCpu, 100d * sys / totalCpu, 100d * idle / totalCpu, - 100d * iowait / totalCpu, 100d * irq / totalCpu, 100d * softirq / totalCpu, 100d * steal / totalCpu)); - processorInfo.add(String.format("CPU load:   %.1f%%", - processor.getSystemCpuLoadBetweenTicks(prevTicks) * 100)); - double[] loadAverage = processor.getSystemLoadAverage(3); - processorInfo.add("CPU load averages:  " - + (loadAverage[0] < 0 ? " N/A" : String.format(" %.2f", loadAverage[0])) - + (loadAverage[1] < 0 ? " N/A" : String.format(" %.2f", loadAverage[1])) - + (loadAverage[2] < 0 ? " N/A" : String.format(" %.2f", loadAverage[2]))); - // per core CPU - StringBuilder procCpu = new StringBuilder("CPU load per processor:  "); - double[] load = processor.getProcessorCpuLoadBetweenTicks(prevProcTicks); - for (double avg : load) { - procCpu.append(String.format(" %.1f%%", avg * 100)); - } - processorInfo.add(procCpu.toString()); - long freq = processor.getProcessorIdentifier().getVendorFreq(); - if (freq > 0) { - processorInfo.add("Vendor Frequency:   " + FormatUtil.formatHertz(freq)); - } - freq = processor.getMaxFreq(); - if (freq > 0) { - processorInfo.add("Max Frequency:   " + FormatUtil.formatHertz(freq)); - } - long[] freqs = processor.getCurrentFreq(); - if (freqs[0] > 0) { - StringBuilder sb = new StringBuilder("Current Frequencies:   "); - for (int i = 0; i < freqs.length; i++) { - if (i > 0) { - sb.append(", "); + processorInfo.add(String.format( + "User: %.1f%% Nice: %.1f%% System: %.1f%% Idle:" + + " %.1f%% IOwait: %.1f%% IRQ: %.1f%% SoftIRQ: %.1f%% Steal: %.1f%%", + 100d * user / totalCpu, 100d * nice / totalCpu, 100d * sys / totalCpu, 100d * idle / totalCpu, + 100d * iowait / totalCpu, 100d * irq / totalCpu, 100d * softirq + / totalCpu, 100d * steal / totalCpu)); + processorInfo.add(String.format("CPU load:   %.1f%%", + processor.getSystemCpuLoadBetweenTicks(prevTicks) * 100)); + double[] loadAverage = processor.getSystemLoadAverage(3); + processorInfo.add("CPU load averages:  " + + (loadAverage[0] < 0 ? " N/A" : String.format(" %.2f", loadAverage[0])) + + (loadAverage[1] < 0 ? " N/A" : String.format(" %.2f", loadAverage[1])) + + (loadAverage[2] < 0 ? " N/A" : String.format(" %.2f", loadAverage[2]))); + // per core CPU + StringBuilder procCpu = new StringBuilder("CPU load per processor:  "); + double[] load = processor.getProcessorCpuLoadBetweenTicks(prevProcTicks); + for (double avg : load) { + procCpu.append(String.format(" %.1f%%", avg * 100)); + } + processorInfo.add(procCpu.toString()); + long freq = processor.getProcessorIdentifier().getVendorFreq(); + if (freq > 0) { + processorInfo.add("Vendor Frequency:   " + FormatUtil.formatHertz(freq)); + } + freq = processor.getMaxFreq(); + if (freq > 0) { + processorInfo.add("Max Frequency:   " + FormatUtil.formatHertz(freq)); + } + long[] freqs = processor.getCurrentFreq(); + if (freqs[0] > 0) { + StringBuilder sb = new StringBuilder("Current Frequencies:   "); + for (int i = 0; i < freqs.length; i++) { + if (i > 0) { + sb.append(", "); + } + sb.append(FormatUtil.formatHertz(freqs[i])); } - sb.append(FormatUtil.formatHertz(freqs[i])); + processorInfo.add(sb.toString()); } - processorInfo.add(sb.toString()); + } catch (Exception e) { + LOG.info("Failed to get processor info", e); } return processorInfo; } private List getMemory(GlobalMemory memory) { List memoryInfo = new ArrayList<>(); - memoryInfo.add("Memory:   " + FormatUtil.formatBytes(memory.getAvailable()) + "/" - + FormatUtil.formatBytes(memory.getTotal())); - VirtualMemory vm = memory.getVirtualMemory(); - memoryInfo.add("Swap used:   " + FormatUtil.formatBytes(vm.getSwapUsed()) + "/" - + FormatUtil.formatBytes(vm.getSwapTotal())); + try { + memoryInfo.add("Memory:   " + FormatUtil.formatBytes(memory.getAvailable()) + "/" + + FormatUtil.formatBytes(memory.getTotal())); + VirtualMemory vm = memory.getVirtualMemory(); + memoryInfo.add("Swap used:   " + FormatUtil.formatBytes(vm.getSwapUsed()) + "/" + + FormatUtil.formatBytes(vm.getSwapTotal())); + } catch (Exception e) { + LOG.info("Failed to get memory info", e); + } return memoryInfo; } private List getProcesses(OperatingSystem os, GlobalMemory memory) { List processInfo = new ArrayList<>(); - processInfo.add("Processes:   " + os.getProcessCount() - + ", Threads:   " + os.getThreadCount()); - // Sort by highest CPU + try { + processInfo.add("Processes:   " + os.getProcessCount() + + ", Threads:   " + os.getThreadCount()); + // Sort by highest CPU - List procs = os.getProcesses((osProcess) -> true, ProcessSorting.CPU_DESC, 5); + List procs = os.getProcesses((osProcess) -> true, ProcessSorting.CPU_DESC, 5); - processInfo.add("         PID %CPU %MEM VSZ RSS Name"); - for (int i = 0; i < procs.size() && i < 5; i++) { - OSProcess p = procs.get(i); - processInfo.add(String.format("         %5d %5.1f %4.1f %9s %9s %s", - p.getProcessID(), - 100d * (p.getKernelTime() + p.getUserTime()) / p.getUpTime(), - 100d * p.getResidentSetSize() / memory.getTotal(), FormatUtil.formatBytes(p.getVirtualSize()), - FormatUtil.formatBytes(p.getResidentSetSize()), p.getName())); + processInfo.add("         PID %CPU %MEM VSZ RSS Name"); + for (int i = 0; i < procs.size() && i < 5; i++) { + OSProcess p = procs.get(i); + processInfo.add(String.format("         " + + "%5d %5.1f %4.1f %9s %9s %s", + p.getProcessID(), + 100d * (p.getKernelTime() + p.getUserTime()) / p.getUpTime(), + 100d * p.getResidentSetSize() / memory.getTotal(), FormatUtil.formatBytes(p.getVirtualSize()), + FormatUtil.formatBytes(p.getResidentSetSize()), p.getName())); + } + } catch (Exception e) { + LOG.info("Failed to get process info", e); } return processInfo; } @@ -229,69 +260,85 @@ private List getDisks(List diskStores) { private List getFileSystem(FileSystem fileSystem) { List fsInfo = new ArrayList<>(); - fsInfo.add("File System:  "); + try { + fsInfo.add("File System:  "); - fsInfo.add(String.format("    File Descriptors: %d/%d", fileSystem.getOpenFileDescriptors(), - fileSystem.getMaxFileDescriptors())); + fsInfo.add(String.format("    File Descriptors: %d/%d", + fileSystem.getOpenFileDescriptors(), + fileSystem.getMaxFileDescriptors())); - List fsList = fileSystem.getFileStores(); - for (OSFileStore fs : fsList) { - long usable = fs.getUsableSpace(); - long total = fs.getTotalSpace(); - fsInfo.add(String.format("        " - + "%s (%s) [%s] %s of %s free (%.1f%%), %s of %s files free (%.1f%%) is %s " - + (fs.getLogicalVolume() != null && !fs.getLogicalVolume().isEmpty() ? "[%s]" : "%s") - + " and is mounted at %s", - fs.getName(), fs.getDescription().isEmpty() ? "file system" : fs.getDescription(), fs.getType(), - FormatUtil.formatBytes(usable), FormatUtil.formatBytes(fs.getTotalSpace()), 100d * usable / total, - FormatUtil.formatValue(fs.getFreeInodes(), ""), FormatUtil.formatValue(fs.getTotalInodes(), ""), - 100d * fs.getFreeInodes() / fs.getTotalInodes(), fs.getVolume(), fs.getLogicalVolume(), - fs.getMount())); + List fsList = fileSystem.getFileStores(); + for (OSFileStore fs : fsList) { + long usable = fs.getUsableSpace(); + long total = fs.getTotalSpace(); + fsInfo.add(String.format("        " + + "%s (%s) [%s] %s of %s free (%.1f%%), %s of %s files free (%.1f%%) is %s " + + (fs.getLogicalVolume() != null && !fs.getLogicalVolume().isEmpty() ? "[%s]" : "%s") + + " and is mounted at %s", + fs.getName(), fs.getDescription().isEmpty() ? "file system" : fs.getDescription(), fs.getType(), + FormatUtil.formatBytes(usable), FormatUtil.formatBytes(fs.getTotalSpace()), + 100d * usable / total, + FormatUtil.formatValue(fs.getFreeInodes(), ""), + FormatUtil.formatValue(fs.getTotalInodes(), ""), + 100d * fs.getFreeInodes() / fs.getTotalInodes(), fs.getVolume(), fs.getLogicalVolume(), + fs.getMount())); + } + } catch (Exception e) { + LOG.info("Failed to get file system info", e); } + return fsInfo; } private List getNetworkInterfaces(List networkIFs) { List getNetwork = new ArrayList<>(); - getNetwork.add("Network interfaces:  "); - for (NetworkIF net : networkIFs) { - getNetwork.add(String.format("    Name: %s (%s)", - net.getName(), net.getDisplayName())); - getNetwork.add(String.format("        MAC Address: %s", - net.getMacaddr())); - getNetwork.add(String.format("        MTU: %s, Speed: %s", - net.getMTU(), FormatUtil.formatValue(net.getSpeed(), "bps"))); - getNetwork.add(String.format("        IPv4: %s", - Arrays.toString(net.getIPv4addr()))); - getNetwork.add(String.format("        IPv6: %s", - Arrays.toString(net.getIPv6addr()))); - boolean hasData = net.getBytesRecv() > 0 || net.getBytesSent() > 0 || net.getPacketsRecv() > 0 - || net.getPacketsSent() > 0; - getNetwork.add(String.format("        Traffic:" - + " received %s/%s%s; transmitted %s/%s%s", - hasData ? net.getPacketsRecv() + " packets" : "?", - hasData ? FormatUtil.formatBytes(net.getBytesRecv()) : "?", - hasData ? " (" + net.getInErrors() + " err)" : "", - hasData ? net.getPacketsSent() + " packets" : "?", - hasData ? FormatUtil.formatBytes(net.getBytesSent()) : "?", - hasData ? " (" + net.getOutErrors() + " err)" : "")); + try { + getNetwork.add("Network interfaces:  "); + for (NetworkIF net : networkIFs) { + getNetwork.add(String.format("    Name: %s (%s)", + net.getName(), net.getDisplayName())); + getNetwork.add(String.format("        MAC Address: %s", + net.getMacaddr())); + getNetwork.add(String.format("        MTU: %s, Speed: %s", + net.getMTU(), FormatUtil.formatValue(net.getSpeed(), "bps"))); + getNetwork.add(String.format("        IPv4: %s", + Arrays.toString(net.getIPv4addr()))); + getNetwork.add(String.format("        IPv6: %s", + Arrays.toString(net.getIPv6addr()))); + boolean hasData = net.getBytesRecv() > 0 || net.getBytesSent() > 0 || net.getPacketsRecv() > 0 + || net.getPacketsSent() > 0; + getNetwork.add(String.format("        Traffic:" + + " received %s/%s%s; transmitted %s/%s%s", + hasData ? net.getPacketsRecv() + " packets" : "?", + hasData ? FormatUtil.formatBytes(net.getBytesRecv()) : "?", + hasData ? " (" + net.getInErrors() + " err)" : "", + hasData ? net.getPacketsSent() + " packets" : "?", + hasData ? FormatUtil.formatBytes(net.getBytesSent()) : "?", + hasData ? " (" + net.getOutErrors() + " err)" : "")); + } + } catch (Exception e) { + LOG.info("Failed to get network info", e); } return getNetwork; } private List getNetworkParameters(NetworkParams networkParams) { List networkParameterInfo = new ArrayList<>(); - networkParameterInfo.add("Network parameters:    "); - networkParameterInfo.add(String.format("        Host name: %s", - networkParams.getHostName())); - networkParameterInfo.add(String.format("         Domain name: %s", - networkParams.getDomainName())); - networkParameterInfo.add(String.format("         DNS servers: %s", - Arrays.toString(networkParams.getDnsServers()))); - networkParameterInfo.add(String.format("         IPv4 Gateway: %s", - networkParams.getIpv4DefaultGateway())); - networkParameterInfo.add(String.format("         IPv6 Gateway: %s", - networkParams.getIpv6DefaultGateway())); + try { + networkParameterInfo.add("Network parameters:    "); + networkParameterInfo.add(String.format("        Host name: %s", + networkParams.getHostName())); + networkParameterInfo.add(String.format("         Domain name: %s", + networkParams.getDomainName())); + networkParameterInfo.add(String.format("         DNS servers: %s", + Arrays.toString(networkParams.getDnsServers()))); + networkParameterInfo.add(String.format("         IPv4 Gateway: %s", + networkParams.getIpv4DefaultGateway())); + networkParameterInfo.add(String.format("         IPv6 Gateway: %s", + networkParams.getIpv6DefaultGateway())); + } catch (Exception e) { + LOG.info("Failed to get network parameters info", e); + } return networkParameterInfo; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/MetaInfoAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/MetaInfoAction.java index 1218736a2cbed9..14368c2869b849 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/MetaInfoAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/MetaInfoAction.java @@ -59,6 +59,7 @@ * And meta info like databases, tables and schema */ @RestController +@Deprecated public class MetaInfoAction extends RestBaseController { private static final String NAMESPACES = "namespaces"; diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java index 8d93a440b22183..09ca16e6ad0c54 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/ShowAction.java @@ -30,6 +30,7 @@ import org.apache.doris.common.proc.ProcNodeInterface; import org.apache.doris.common.proc.ProcResult; import org.apache.doris.common.proc.ProcService; +import org.apache.doris.datasource.InternalCatalog; import org.apache.doris.ha.HAProtocol; import org.apache.doris.httpv2.entity.ResponseEntityBuilder; import org.apache.doris.mysql.privilege.PrivPredicate; @@ -214,16 +215,23 @@ public Object show_data(HttpServletRequest request, HttpServletResponse response public Object show_table_data(HttpServletRequest request, HttpServletResponse response) { if (Config.enable_all_http_auth) { executeCheckPassword(request, response); - checkGlobalAuth(ConnectContext.get().getCurrentUserIdentity(), PrivPredicate.ADMIN); } - String dbName = request.getParameter(DB_KEY); String tableName = request.getParameter(TABLE_KEY); + + if (StringUtils.isEmpty(dbName) && StringUtils.isEmpty(tableName)) { + return ResponseEntityBuilder.okWithCommonError("db and table cannot be empty at the same time"); + } + String singleReplica = request.getParameter(SINGLE_REPLICA_KEY); boolean singleReplicaBool = Boolean.parseBoolean(singleReplica); Map> oneEntry = Maps.newHashMap(); if (dbName != null) { String fullDbName = getFullDbName(dbName); + if (!StringUtils.isEmpty(tableName) && Config.enable_all_http_auth) { + checkTblAuth(ConnectContext.get().getCurrentUserIdentity(), fullDbName, tableName, PrivPredicate.SHOW); + } + DatabaseIf db = Env.getCurrentInternalCatalog().getDbNullable(fullDbName); if (db == null) { return ResponseEntityBuilder.okWithCommonError("database " + fullDbName + " not found."); @@ -236,6 +244,12 @@ public Object show_table_data(HttpServletRequest request, HttpServletResponse re if (db == null || !(db instanceof Database) || ((Database) db) instanceof MysqlCompatibleDatabase) { continue; } + if (Config.enable_all_http_auth && !Env.getCurrentEnv().getAccessManager() + .checkTblPriv(ConnectContext.get().getCurrentUserIdentity(), + InternalCatalog.INTERNAL_CATALOG_NAME, db.getFullName(), tableName, + PrivPredicate.SHOW)) { + continue; + } Map tablesEntry = getDataSizeOfTables(db, tableName, singleReplicaBool); oneEntry.put(ClusterNamespace.getNameFromFullName(db.getFullName()), tablesEntry); } @@ -331,6 +345,12 @@ private Map getDataSizeOfTables(DatabaseIf db, String tableName, b if (Strings.isNullOrEmpty(tableName)) { List
  • tables = db.getTables(); for (Table table : tables) { + if (Config.enable_all_http_auth && !Env.getCurrentEnv().getAccessManager() + .checkTblPriv(ConnectContext.get(), InternalCatalog.INTERNAL_CATALOG_NAME, db.getFullName(), + table.getName(), + PrivPredicate.SHOW)) { + continue; + } Map tableEntry = getDataSizeOfTable(table, singleReplica); oneEntry.putAll(tableEntry); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java index a5c915e0bbc48f..95934d31dee5c9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java @@ -42,7 +42,7 @@ * Used to return the cluster information for the manager. */ @RestController -@RequestMapping("/rest/v2/manager/cluster") +@RequestMapping(path = {"/rest/v2/manager/cluster", "/rest/v2/manager/compute_group"}) public class ClusterAction extends RestBaseController { // Returns mysql and http connection information for the cluster. @@ -54,7 +54,7 @@ public class ClusterAction extends RestBaseController { // "" // ] // } - @RequestMapping(path = "/cluster_info/conn_info", method = RequestMethod.GET) + @RequestMapping(path = {"/cluster_info/conn_info", "/compute_group_info/conn_info"}, method = RequestMethod.GET) public Object clusterInfo(HttpServletRequest request, HttpServletResponse response) { executeCheckPassword(request, response); checkGlobalAuth(ConnectContext.get().getCurrentUserIdentity(), PrivPredicate.ADMIN); @@ -85,7 +85,8 @@ public static class BeClusterInfo { public volatile long lastFragmentUpdateTime = 0; } - @RequestMapping(path = "/cluster_info/cloud_cluster_status", method = RequestMethod.GET) + @RequestMapping(path = {"/cluster_info/cloud_cluster_status", "/compute_group_info/compute_group_status"}, + method = RequestMethod.GET) public Object cloudClusterInfo(HttpServletRequest request, HttpServletResponse response) { executeCheckPassword(request, response); checkGlobalAuth(ConnectContext.get().getCurrentUserIdentity(), PrivPredicate.ADMIN); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/QueryProfileAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/QueryProfileAction.java index 0623932bc9dad2..e5f19f90a1a9fe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/QueryProfileAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/QueryProfileAction.java @@ -22,6 +22,7 @@ import org.apache.doris.common.AuthenticationException; import org.apache.doris.common.Config; import org.apache.doris.common.Pair; +import org.apache.doris.common.Status; import org.apache.doris.common.proc.CurrentQueryStatementsProcNode; import org.apache.doris.common.proc.ProcResult; import org.apache.doris.common.profile.ProfileTreeNode; @@ -38,6 +39,7 @@ import org.apache.doris.qe.ConnectContext; import org.apache.doris.service.ExecuteEnv; import org.apache.doris.service.FrontendOptions; +import org.apache.doris.thrift.TStatusCode; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; @@ -576,7 +578,7 @@ public Object killQuery(HttpServletRequest request, HttpServletResponse response } ExecuteEnv env = ExecuteEnv.getInstance(); - env.getScheduler().cancelQuery(queryId); + env.getScheduler().cancelQuery(queryId, new Status(TStatusCode.CANCELLED, "cancel query by rest api")); return ResponseEntityBuilder.ok(); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java index 13a247ba6338e9..436d2611584502 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java @@ -86,13 +86,14 @@ public Object getAllDatabases( HttpServletRequest request, HttpServletResponse response) { checkWithCookie(request, response, false); - if (!ns.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { - return ResponseEntityBuilder.badRequest("Only support 'default_cluster' now"); + if (!ns.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER) && !ns.equalsIgnoreCase( + InternalCatalog.INTERNAL_CATALOG_NAME)) { + return ResponseEntityBuilder.badRequest("Only support 'default_cluster/internal' now"); } // 1. get all database with privilege List dbNames = Env.getCurrentInternalCatalog().getDbNames(); - List dbNameSet = Lists.newArrayList(); + List filteredDbNames = Lists.newArrayList(); for (String fullName : dbNames) { final String db = ClusterNamespace.getNameFromFullName(fullName); if (!Env.getCurrentEnv().getAccessManager() @@ -100,14 +101,14 @@ public Object getAllDatabases( PrivPredicate.SHOW)) { continue; } - dbNameSet.add(db); + filteredDbNames.add(db); } - Collections.sort(dbNames); + Collections.sort(filteredDbNames); // handle limit offset - Pair fromToIndex = getFromToIndex(request, dbNames.size()); - return ResponseEntityBuilder.ok(dbNames.subList(fromToIndex.first, fromToIndex.second)); + Pair fromToIndex = getFromToIndex(request, filteredDbNames.size()); + return ResponseEntityBuilder.ok(filteredDbNames.subList(fromToIndex.first, fromToIndex.second)); } /** Get all tables of a database @@ -129,8 +130,9 @@ public Object getTables( HttpServletRequest request, HttpServletResponse response) { checkWithCookie(request, response, false); - if (!ns.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { - return ResponseEntityBuilder.badRequest("Only support 'default_cluster' now"); + if (!ns.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER) && !ns.equalsIgnoreCase( + InternalCatalog.INTERNAL_CATALOG_NAME)) { + return ResponseEntityBuilder.badRequest("Only support 'default_cluster/internal' now"); } String fullDbName = getFullDbName(dbName); @@ -199,8 +201,9 @@ public Object getTableSchema( HttpServletRequest request, HttpServletResponse response) throws UserException { checkWithCookie(request, response, false); - if (!ns.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { - return ResponseEntityBuilder.badRequest("Only support 'default_cluster' now"); + if (!ns.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER) && !ns.equalsIgnoreCase( + InternalCatalog.INTERNAL_CATALOG_NAME)) { + return ResponseEntityBuilder.badRequest("Only support 'default_cluster/internal' now"); } String fullDbName = getFullDbName(dbName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteManager.java b/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteManager.java index 81524ae020810e..a00107c76a74a0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteManager.java @@ -17,17 +17,21 @@ package org.apache.doris.insertoverwrite; +import org.apache.doris.catalog.DatabaseIf; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.OlapTable; +import org.apache.doris.catalog.TableIf; import org.apache.doris.common.DdlException; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.common.util.MasterDaemon; import org.apache.doris.insertoverwrite.InsertOverwriteLog.InsertOverwriteOpType; +import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.persist.gson.GsonUtils; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; +import com.google.common.collect.Sets; import com.google.gson.annotations.SerializedName; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -40,7 +44,9 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; public class InsertOverwriteManager extends MasterDaemon implements Writable { private static final Logger LOG = LogManager.getLogger(InsertOverwriteManager.class); @@ -62,6 +68,11 @@ public class InsertOverwriteManager extends MasterDaemon implements Writable { @SerializedName(value = "partitionPairs") private Map> partitionPairs = Maps.newConcurrentMap(); + // TableId running insert overwrite + // dbId ==> Set + private Map> runningTables = Maps.newHashMap(); + private ReentrantReadWriteLock runningLock = new ReentrantReadWriteLock(true); + public InsertOverwriteManager() { super("InsertOverwriteDropDirtyPartitions", CLEAN_INTERVAL_SECOND * 1000); } @@ -270,6 +281,53 @@ private boolean rollback(long taskId) { return InsertOverwriteUtil.dropPartitions(olapTable, task.getTempPartitionNames()); } + /** + * If the current table id has a running insert overwrite, throw an exception. + * If not, record it in runningTables + * + * @param db Run the db for insert overwrite + * @param table Run the table for insert overwrite + */ + public void recordRunningTableOrException(DatabaseIf db, TableIf table) { + long dbId = db.getId(); + long tableId = table.getId(); + runningLock.writeLock().lock(); + try { + if (runningTables.containsKey(dbId) && runningTables.get(dbId).contains(tableId)) { + throw new AnalysisException( + String.format("Not allowed running Insert Overwrite on same table: %s.%s", db.getFullName(), + table.getName())); + } + if (runningTables.containsKey(dbId)) { + runningTables.get(dbId).add(tableId); + } else { + runningTables.put(dbId, Sets.newHashSet(tableId)); + } + } finally { + runningLock.writeLock().unlock(); + } + } + + /** + * Remove from running records + * + * @param dbId Run the dbId for insert overwrite + * @param tableId Run the tableId for insert overwrite + */ + public void dropRunningRecord(long dbId, long tableId) { + runningLock.writeLock().lock(); + try { + if (runningTables.containsKey(dbId) && runningTables.get(dbId).contains(tableId)) { + runningTables.get(dbId).remove(tableId); + if (runningTables.get(dbId).size() == 0) { + runningTables.remove(dbId); + } + } + } finally { + runningLock.writeLock().unlock(); + } + } + /** * replay logs * diff --git a/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteUtil.java b/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteUtil.java index 90cb8cf1fd2cb4..c2842569ca53d0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/insertoverwrite/InsertOverwriteUtil.java @@ -78,7 +78,7 @@ public static void replacePartition(TableIf olapTable, List partitionNam properties.put(PropertyAnalyzer.PROPERTIES_USE_TEMP_PARTITION_NAME, "false"); ReplacePartitionClause replacePartitionClause = new ReplacePartitionClause( new PartitionNames(false, partitionNames), - new PartitionNames(true, tempPartitionNames), true, properties); + new PartitionNames(true, tempPartitionNames), false, properties); if (replacePartitionClause.getTempPartitionNames().isEmpty()) { return; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/InsertTask.java b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/InsertTask.java index 1c41c243f7d7d7..d1a425aeaf7838 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/InsertTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/insert/InsertTask.java @@ -21,6 +21,7 @@ import org.apache.doris.catalog.Column; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.ScalarType; +import org.apache.doris.common.Status; import org.apache.doris.common.util.TimeUtils; import org.apache.doris.job.exception.JobException; import org.apache.doris.job.task.AbstractTask; @@ -33,6 +34,7 @@ import org.apache.doris.qe.StmtExecutor; import org.apache.doris.thrift.TCell; import org.apache.doris.thrift.TRow; +import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TUniqueId; import com.google.common.collect.ImmutableList; @@ -219,7 +221,7 @@ protected void executeCancelLogic() { } isCanceled.getAndSet(true); if (null != stmtExecutor) { - stmtExecutor.cancel(); + stmtExecutor.cancel(new Status(TStatusCode.CANCELLED, "insert task cancelled")); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVJob.java b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVJob.java index 5d7cf4435b9c7a..add191001f9125 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVJob.java @@ -202,7 +202,7 @@ public List queryTasks() { LOG.warn("get mtmv failed", e); return Lists.newArrayList(); } - return Lists.newArrayList(mtmv.getJobInfo().getHistoryTasks()); + return Lists.newArrayList(mtmv.getHistoryTasks()); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java index 0fe60a94e56b57..a2ec9fb03b00dd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/job/extensions/mtmv/MTMVTask.java @@ -25,6 +25,7 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.DdlException; import org.apache.doris.common.FeConstants; +import org.apache.doris.common.Status; import org.apache.doris.common.UserException; import org.apache.doris.common.util.DebugUtil; import org.apache.doris.common.util.TimeUtils; @@ -50,6 +51,7 @@ import org.apache.doris.qe.StmtExecutor; import org.apache.doris.thrift.TCell; import org.apache.doris.thrift.TRow; +import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TUniqueId; import com.google.common.collect.ImmutableList; @@ -229,6 +231,10 @@ private void exec(ConnectContext ctx, Set refreshPartitionNames, ctx.setQueryId(queryId); ctx.getState().setNereids(true); command.run(ctx, executor); + if (getStatus() == TaskStatus.CANCELED) { + // Throwing an exception to interrupt subsequent partition update tasks + throw new JobException("task is CANCELED"); + } if (ctx.getState().getStateType() != MysqlStateType.OK) { throw new JobException(ctx.getState().getErrorMessage()); } @@ -254,7 +260,7 @@ public synchronized void onSuccess() throws JobException { protected synchronized void executeCancelLogic() { LOG.info("mtmv task cancel, taskId: {}", super.getTaskId()); if (executor != null) { - executor.cancel(); + executor.cancel(new Status(TStatusCode.CANCELLED, "mtmv task cancelled")); } after(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/ExportTaskExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/load/ExportTaskExecutor.java index 56969357066846..1cfdc0c174c645 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/ExportTaskExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/ExportTaskExecutor.java @@ -18,7 +18,6 @@ package org.apache.doris.load; import org.apache.doris.analysis.OutFileClause; -import org.apache.doris.analysis.SelectStmt; import org.apache.doris.analysis.StatementBase; import org.apache.doris.catalog.Database; import org.apache.doris.catalog.Env; @@ -26,6 +25,7 @@ import org.apache.doris.catalog.Partition; import org.apache.doris.catalog.TabletMeta; import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.Status; import org.apache.doris.load.ExportFailMsg.CancelType; import org.apache.doris.nereids.analyzer.UnboundRelation; import org.apache.doris.nereids.glue.LogicalPlanAdapter; @@ -36,6 +36,7 @@ import org.apache.doris.qe.StmtExecutor; import org.apache.doris.scheduler.exception.JobException; import org.apache.doris.scheduler.executor.TransientTaskExecutor; +import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TUniqueId; import com.google.common.collect.Lists; @@ -95,15 +96,10 @@ public void execute() throws JobException { table.readLock(); try { List tabletIds; - if (exportJob.getSessionVariables().isEnableNereidsPlanner()) { - LogicalPlanAdapter logicalPlanAdapter = (LogicalPlanAdapter) selectStmtLists.get(idx); - Optional unboundRelation = findUnboundRelation( - logicalPlanAdapter.getLogicalPlan()); - tabletIds = unboundRelation.get().getTabletIds(); - } else { - SelectStmt selectStmt = (SelectStmt) selectStmtLists.get(idx); - tabletIds = selectStmt.getTableRefs().get(0).getSampleTabletIds(); - } + LogicalPlanAdapter logicalPlanAdapter = (LogicalPlanAdapter) selectStmtLists.get(idx); + Optional unboundRelation = findUnboundRelation( + logicalPlanAdapter.getLogicalPlan()); + tabletIds = unboundRelation.get().getTabletIds(); for (Long tabletId : tabletIds) { TabletMeta tabletMeta = Env.getCurrentEnv().getTabletInvertedIndex().getTabletMeta( @@ -162,7 +158,7 @@ public void cancel() throws JobException { } isCanceled.getAndSet(true); if (stmtExecutor != null) { - stmtExecutor.cancel(); + stmtExecutor.cancel(new Status(TStatusCode.CANCELLED, "export task cancelled")); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java index fb5f06fced570b..5e1b085b239474 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/BrokerLoadJob.java @@ -29,6 +29,7 @@ import org.apache.doris.common.DataQualityException; import org.apache.doris.common.DdlException; import org.apache.doris.common.DuplicatedRequestException; +import org.apache.doris.common.InternalErrorCode; import org.apache.doris.common.LabelAlreadyUsedException; import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.common.QuotaExceedException; @@ -335,42 +336,59 @@ private void onLoadingTaskFinished(BrokerLoadingTaskAttachment attachment) { } Database db = null; List
    tableList = null; - try { - db = getDb(); - tableList = db.getTablesOnIdOrderOrThrowException(Lists.newArrayList(fileGroupAggInfo.getAllTableIds())); - if (Config.isCloudMode()) { - MetaLockUtils.commitLockTables(tableList); - } else { - MetaLockUtils.writeLockTablesOrMetaException(tableList); + int retryTimes = 0; + while (true) { + try { + db = getDb(); + tableList = db.getTablesOnIdOrderOrThrowException( + Lists.newArrayList(fileGroupAggInfo.getAllTableIds())); + if (Config.isCloudMode()) { + MetaLockUtils.commitLockTables(tableList); + } else { + MetaLockUtils.writeLockTablesOrMetaException(tableList); + } + } catch (MetaNotFoundException e) { + LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) + .add("database_id", dbId) + .add("error_msg", "db has been deleted when job is loading") + .build(), e); + cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.LOAD_RUN_FAIL, e.getMessage()), true, true); + return; } - } catch (MetaNotFoundException e) { - LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) - .add("database_id", dbId) - .add("error_msg", "db has been deleted when job is loading") - .build(), e); - cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.LOAD_RUN_FAIL, e.getMessage()), true, true); - return; - } - try { - LOG.info(new LogBuilder(LogKey.LOAD_JOB, id) - .add("txn_id", transactionId) - .add("msg", "Load job try to commit txn") - .build()); - Env.getCurrentGlobalTransactionMgr().commitTransaction( - dbId, tableList, transactionId, commitInfos, getLoadJobFinalOperation()); - afterLoadingTaskCommitTransaction(tableList); - afterCommit(); - } catch (UserException e) { - LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) - .add("database_id", dbId) - .add("error_msg", "Failed to commit txn with error:" + e.getMessage()) - .build(), e); - cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.LOAD_RUN_FAIL, e.getMessage()), true, true); - } finally { - if (Config.isCloudMode()) { - MetaLockUtils.commitUnlockTables(tableList); - } else { - MetaLockUtils.writeUnlockTables(tableList); + try { + LOG.info(new LogBuilder(LogKey.LOAD_JOB, id) + .add("txn_id", transactionId) + .add("msg", "Load job try to commit txn") + .build()); + Env.getCurrentGlobalTransactionMgr().commitTransaction( + dbId, tableList, transactionId, commitInfos, getLoadJobFinalOperation()); + afterLoadingTaskCommitTransaction(tableList); + afterCommit(); + return; + } catch (UserException e) { + LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) + .add("database_id", dbId) + .add("retry_times", retryTimes) + .add("error_msg", "Failed to commit txn with error:" + e.getMessage()) + .build(), e); + if (e.getErrorCode() == InternalErrorCode.DELETE_BITMAP_LOCK_ERR) { + retryTimes++; + if (retryTimes >= Config.mow_calculate_delete_bitmap_retry_times) { + LOG.warn("cancelJob {} because up to max retry time,exception {}", id, e); + cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.LOAD_RUN_FAIL, e.getMessage()), true, + true); + return; + } + } else { + cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.LOAD_RUN_FAIL, e.getMessage()), true, true); + return; + } + } finally { + if (Config.isCloudMode()) { + MetaLockUtils.commitUnlockTables(tableList); + } else { + MetaLockUtils.writeUnlockTables(tableList); + } } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java index 3268fb3c464836..f450a1dca7dff6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/LoadJob.java @@ -36,6 +36,7 @@ import org.apache.doris.common.LoadException; import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.common.QuotaExceedException; +import org.apache.doris.common.Status; import org.apache.doris.common.UserException; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; @@ -57,6 +58,7 @@ import org.apache.doris.qe.QeProcessorImpl; import org.apache.doris.thrift.TEtlState; import org.apache.doris.thrift.TPipelineWorkloadGroup; +import org.apache.doris.thrift.TStatusCode; import org.apache.doris.thrift.TUniqueId; import org.apache.doris.transaction.AbstractTxnStateChangeCallback; import org.apache.doris.transaction.BeginTransactionException; @@ -607,7 +609,7 @@ protected void unprotectedExecuteRetry(FailMsg failMsg) { for (TUniqueId loadId : loadIds) { Coordinator coordinator = QeProcessorImpl.INSTANCE.getCoordinator(loadId); if (coordinator != null) { - coordinator.cancel(); + coordinator.cancel(new Status(TStatusCode.CANCELLED, failMsg.getMsg())); } } @@ -671,7 +673,7 @@ protected void unprotectedExecuteCancel(FailMsg failMsg, boolean abortTxn) { for (TUniqueId loadId : loadIds) { Coordinator coordinator = QeProcessorImpl.INSTANCE.getCoordinator(loadId); if (coordinator != null) { - coordinator.cancel(); + coordinator.cancel(new Status(TStatusCode.CANCELLED, failMsg.getMsg())); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java index 1f1c71d7a903d2..f01f205e96dc0d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/loadv2/SparkLoadJob.java @@ -45,9 +45,11 @@ import org.apache.doris.catalog.Tablet; import org.apache.doris.catalog.Type; import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.Config; import org.apache.doris.common.DataQualityException; import org.apache.doris.common.DdlException; import org.apache.doris.common.DuplicatedRequestException; +import org.apache.doris.common.InternalErrorCode; import org.apache.doris.common.LabelAlreadyUsedException; import org.apache.doris.common.LoadException; import org.apache.doris.common.MetaNotFoundException; @@ -656,21 +658,50 @@ public void updateLoadingStatus() throws UserException { } private void tryCommitJob() throws UserException { - LOG.info(new LogBuilder(LogKey.LOAD_JOB, id).add("txn_id", transactionId) - .add("msg", "Load job try to commit txn").build()); - Database db = getDb(); - List
    tableList = db.getTablesOnIdOrderOrThrowException( - Lists.newArrayList(tableToLoadPartitions.keySet())); - MetaLockUtils.writeLockTablesOrMetaException(tableList); - try { - Env.getCurrentGlobalTransactionMgr().commitTransaction( - dbId, tableList, transactionId, commitInfos, - new LoadJobFinalOperation(id, loadingStatus, progress, loadStartTimestamp, - finishTimestamp, state, failMsg)); - } catch (TabletQuorumFailedException e) { - // retry in next loop - } finally { - MetaLockUtils.writeUnlockTables(tableList); + int retryTimes = 0; + while (true) { + Database db = getDb(); + List
    tableList = db.getTablesOnIdOrderOrThrowException( + Lists.newArrayList(tableToLoadPartitions.keySet())); + if (Config.isCloudMode()) { + MetaLockUtils.commitLockTables(tableList); + } else { + MetaLockUtils.writeLockTablesOrMetaException(tableList); + } + try { + LOG.info(new LogBuilder(LogKey.LOAD_JOB, id).add("txn_id", transactionId) + .add("msg", "Load job try to commit txn").build()); + Env.getCurrentGlobalTransactionMgr().commitTransaction( + dbId, tableList, transactionId, commitInfos, + new LoadJobFinalOperation(id, loadingStatus, progress, loadStartTimestamp, + finishTimestamp, state, failMsg)); + return; + } catch (TabletQuorumFailedException e) { + // retry in next loop + return; + } catch (UserException e) { + LOG.warn(new LogBuilder(LogKey.LOAD_JOB, id) + .add("txn_id", transactionId) + .add("database_id", dbId) + .add("retry_times", retryTimes) + .add("error_msg", "Failed to commit txn with error:" + e.getMessage()) + .build(), e); + if (e.getErrorCode() == InternalErrorCode.DELETE_BITMAP_LOCK_ERR) { + retryTimes++; + if (retryTimes >= Config.mow_calculate_delete_bitmap_retry_times) { + LOG.warn("cancelJob {} because up to max retry time, exception {}", id, e); + throw e; + } + } else { + throw e; + } + } finally { + if (Config.isCloudMode()) { + MetaLockUtils.commitUnlockTables(tableList); + } else { + MetaLockUtils.writeUnlockTables(tableList); + } + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java index 1762f8d11223d0..6bdef3301a610e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaRoutineLoadJob.java @@ -234,7 +234,8 @@ public void divideRoutineLoadJob(int currentConcurrentTaskNum) throws UserExcept ((KafkaProgress) progress).getOffsetByPartition(kafkaPartition)); } KafkaTaskInfo kafkaTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), id, - maxBatchIntervalS * 2 * 1000, 0, taskKafkaProgress, isMultiTable()); + maxBatchIntervalS * Config.routine_load_task_timeout_multiplier * 1000, + taskKafkaProgress, isMultiTable()); routineLoadTaskInfoList.add(kafkaTaskInfo); result.add(kafkaTaskInfo); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java index 7aa9ebda09fda0..f1578269529a12 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/KafkaTaskInfo.java @@ -49,16 +49,14 @@ public class KafkaTaskInfo extends RoutineLoadTaskInfo { private Map partitionIdToOffset; public KafkaTaskInfo(UUID id, long jobId, - long timeoutMs, int timeoutBackOffCount, - Map partitionIdToOffset, boolean isMultiTable) { - super(id, jobId, timeoutMs, timeoutBackOffCount, isMultiTable); + long timeoutMs, Map partitionIdToOffset, boolean isMultiTable) { + super(id, jobId, timeoutMs, isMultiTable); this.partitionIdToOffset = partitionIdToOffset; } public KafkaTaskInfo(KafkaTaskInfo kafkaTaskInfo, Map partitionIdToOffset, boolean isMultiTable) { super(UUID.randomUUID(), kafkaTaskInfo.getJobId(), - kafkaTaskInfo.getTimeoutMs(), kafkaTaskInfo.getTimeoutBackOffCount(), - kafkaTaskInfo.getBeId(), isMultiTable); + kafkaTaskInfo.getTimeoutMs(), kafkaTaskInfo.getBeId(), isMultiTable); this.partitionIdToOffset = partitionIdToOffset; this.isEof = kafkaTaskInfo.getIsEof(); } @@ -137,11 +135,6 @@ private TPipelineFragmentParams rePlan(RoutineLoadJob routineLoadJob) throws Use TPipelineFragmentParams tExecPlanFragmentParams = routineLoadJob.plan(planner, loadId, txnId); TPlanFragment tPlanFragment = tExecPlanFragmentParams.getFragment(); tPlanFragment.getOutputSink().getOlapTableSink().setTxnId(txnId); - // it needs update timeout to make task timeout backoff work - long timeoutS = this.getTimeoutMs() / 1000; - tPlanFragment.getOutputSink().getOlapTableSink().setLoadChannelTimeoutS(timeoutS); - tExecPlanFragmentParams.getQueryOptions().setQueryTimeout((int) timeoutS); - tExecPlanFragmentParams.getQueryOptions().setExecutionTimeout((int) timeoutS); if (Config.enable_workload_group) { long wgId = routineLoadJob.getWorkloadId(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java index ac4a548c62f897..e12b215a0e4615 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java @@ -781,18 +781,6 @@ public void processTimeoutTasks() { // and after renew, the previous task is removed from routineLoadTaskInfoList, // so task can no longer be committed successfully. // the already committed task will not be handled here. - int timeoutBackOffCount = routineLoadTaskInfo.getTimeoutBackOffCount(); - if (timeoutBackOffCount > RoutineLoadTaskInfo.MAX_TIMEOUT_BACK_OFF_COUNT) { - try { - updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.TIMEOUT_TOO_MUCH, - "task " + routineLoadTaskInfo.getId() + " timeout too much"), false); - } catch (UserException e) { - LOG.warn("update job state to pause failed", e); - } - return; - } - routineLoadTaskInfo.setTimeoutBackOffCount(timeoutBackOffCount + 1); - routineLoadTaskInfo.setTimeoutMs((routineLoadTaskInfo.getTimeoutMs() << 1)); RoutineLoadTaskInfo newTask = unprotectRenewTask(routineLoadTaskInfo); Env.getCurrentEnv().getRoutineLoadTaskScheduler().addTaskInQueue(newTask); } @@ -1526,7 +1514,7 @@ public void update() throws UserException { .add("msg", "Job need to be rescheduled") .build()); unprotectUpdateProgress(); - executeNeedSchedule(); + unprotectUpdateState(JobState.NEED_SCHEDULE, null, false); } } finally { writeUnlock(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java index 301efe4d9c9604..1ff825d97b9d17 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadTaskInfo.java @@ -73,28 +73,23 @@ public abstract class RoutineLoadTaskInfo { protected boolean isMultiTable = false; - protected static final int MAX_TIMEOUT_BACK_OFF_COUNT = 3; - protected int timeoutBackOffCount = 0; - protected boolean isEof = false; // this status will be set when corresponding transaction's status is changed. // so that user or other logic can know the status of the corresponding txn. protected TransactionStatus txnStatus = TransactionStatus.UNKNOWN; - public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, - int timeoutBackOffCount, boolean isMultiTable) { + public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, boolean isMultiTable) { this.id = id; this.jobId = jobId; this.createTimeMs = System.currentTimeMillis(); this.timeoutMs = timeoutMs; - this.timeoutBackOffCount = timeoutBackOffCount; this.isMultiTable = isMultiTable; } - public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, int timeoutBackOffCount, - long previousBeId, boolean isMultiTable) { - this(id, jobId, timeoutMs, timeoutBackOffCount, isMultiTable); + public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, long previousBeId, + boolean isMultiTable) { + this(id, jobId, timeoutMs, isMultiTable); this.previousBeId = previousBeId; } @@ -138,10 +133,6 @@ public void setLastScheduledTime(long lastScheduledTime) { this.lastScheduledTime = lastScheduledTime; } - public void setTimeoutMs(long timeoutMs) { - this.timeoutMs = timeoutMs; - } - public long getTimeoutMs() { return timeoutMs; } @@ -154,14 +145,6 @@ public TransactionStatus getTxnStatus() { return txnStatus; } - public void setTimeoutBackOffCount(int timeoutBackOffCount) { - this.timeoutBackOffCount = timeoutBackOffCount; - } - - public int getTimeoutBackOffCount() { - return timeoutBackOffCount; - } - public boolean getIsEof() { return isEof; } @@ -173,33 +156,17 @@ public boolean isTimeout() { } if (isRunning() && System.currentTimeMillis() - executeStartTimeMs > timeoutMs) { - LOG.info("task {} is timeout. start: {}, timeout: {}, timeoutBackOffCount: {}", DebugUtil.printId(id), - executeStartTimeMs, timeoutMs, timeoutBackOffCount); + LOG.info("task {} is timeout. start: {}, timeout: {}", DebugUtil.printId(id), + executeStartTimeMs, timeoutMs); return true; } return false; } public void handleTaskByTxnCommitAttachment(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) { - selfAdaptTimeout(rlTaskTxnCommitAttachment); judgeEof(rlTaskTxnCommitAttachment); } - private void selfAdaptTimeout(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) { - long taskExecutionTime = rlTaskTxnCommitAttachment.getTaskExecutionTimeMs(); - long timeoutMs = this.timeoutMs; - - while (this.timeoutBackOffCount > 0) { - timeoutMs = timeoutMs >> 1; - if (timeoutMs <= taskExecutionTime) { - this.timeoutMs = timeoutMs << 1; - return; - } - this.timeoutBackOffCount--; - } - this.timeoutMs = timeoutMs; - } - private void judgeEof(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) { RoutineLoadJob routineLoadJob = routineLoadManager.getJob(jobId); if (rlTaskTxnCommitAttachment.getTotalRows() < routineLoadJob.getMaxBatchRows() diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java index fbb3aab4ebdcd2..b427f4b62babb8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/MasterImpl.java @@ -30,6 +30,7 @@ import org.apache.doris.catalog.TabletInvertedIndex; import org.apache.doris.catalog.TabletMeta; import org.apache.doris.cloud.catalog.CloudTablet; +import org.apache.doris.common.Config; import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.load.DeleteJob; import org.apache.doris.load.loadv2.SparkLoadJob; @@ -88,11 +89,13 @@ public TMasterResult finishTask(TFinishTaskRequest request) { // check task status // retry task by report process TStatus taskStatus = request.getTaskStatus(); + TTaskType taskType = request.getTaskType(); + long signature = request.getSignature(); if (LOG.isDebugEnabled()) { LOG.debug("get task report: {}", request); } - if (taskStatus.getStatusCode() != TStatusCode.OK) { + if (taskStatus.getStatusCode() != TStatusCode.OK && taskType != TTaskType.PUBLISH_VERSION) { LOG.warn("finish task reports bad. request: {}", request); } @@ -111,8 +114,6 @@ public TMasterResult finishTask(TFinishTaskRequest request) { } long backendId = backend.getId(); - TTaskType taskType = request.getTaskType(); - long signature = request.getSignature(); AgentTask task = AgentTaskQueue.getTask(backendId, taskType, signature); if (task == null) { @@ -130,6 +131,13 @@ public TMasterResult finishTask(TFinishTaskRequest request) { } else { if (taskStatus.getStatusCode() != TStatusCode.OK) { task.failed(); + if (taskType == TTaskType.PUBLISH_VERSION) { + boolean needLog = (Config.publish_version_task_failed_log_threshold < 0 + || task.getFailedTimes() <= Config.publish_version_task_failed_log_threshold); + if (needLog) { + LOG.warn("finish task reports bad. request: {}", request); + } + } String errMsg = "task type: " + taskType + ", status_code: " + taskStatus.getStatusCode().toString() + (taskStatus.isSetErrorMsgs() ? (", status_message: " + taskStatus.getErrorMsgs()) : "") + ", backendId: " + backend + ", signature: " + signature; diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java index f7702a495544d2..a4a5273e8ea0bf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java @@ -96,6 +96,7 @@ import com.google.common.collect.Maps; import com.google.common.collect.Queues; import com.google.common.collect.Sets; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -964,10 +965,19 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta objectPool, olapTable.rowStorePageSize(), olapTable.variantEnableFlattenNested()); - createReplicaTask.setIsRecoverTask(true); createReplicaTask.setInvertedIndexFileStorageFormat(olapTable .getInvertedIndexFileStorageFormat()); + if (indexId == olapTable.getBaseIndexId() || olapTable.isShadowIndex(indexId)) { + List clusterKeyIndexes = OlapTable.getClusterKeyIndexes( + indexMeta.getSchema()); + if (!CollectionUtils.isEmpty(clusterKeyIndexes)) { + createReplicaTask.setClusterKeyIndexes(clusterKeyIndexes); + LOG.info("table: {}, partition: {}, index: {}, tablet: {}, " + + "cluster key indexes: {}", tableId, partitionId, indexId, + tabletId, clusterKeyIndexes); + } + } createReplicaBatchTask.addTask(createReplicaTask); } else { // just set this replica as bad diff --git a/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVService.java b/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVService.java index 4b740b75ef8ce7..278811d3a991f7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVService.java @@ -24,6 +24,7 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.DdlException; import org.apache.doris.common.MetaNotFoundException; +import org.apache.doris.event.DropPartitionEvent; import org.apache.doris.event.Event; import org.apache.doris.event.EventException; import org.apache.doris.event.EventListener; @@ -177,6 +178,9 @@ public void processEvent(Event event) throws EventException { if (!(event instanceof TableEvent)) { return; } + if (event instanceof DropPartitionEvent && ((DropPartitionEvent) event).isTempPartition()) { + return; + } TableEvent tableEvent = (TableEvent) event; LOG.info("processEvent, Event: {}", event); TableIf table; diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerFactory.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerFactory.java index d4e0400c9ebcea..8d1481aa070d39 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerFactory.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerFactory.java @@ -20,6 +20,14 @@ import java.util.Map; public interface AccessControllerFactory { + /** + * Returns the identifier for the factory, such as "range-doris". + * + * @return the factory identifier + */ + default String factoryIdentifier() { + return this.getClass().getSimpleName(); + } CatalogAccessController createAccessController(Map prop); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerManager.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerManager.java index ba23c91e27df78..bfdf0a7b095bcc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/AccessControllerManager.java @@ -22,12 +22,12 @@ import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.AuthorizationInfo; import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.authorizer.ranger.doris.RangerCacheDorisAccessController; import org.apache.doris.common.Config; import org.apache.doris.common.UserException; import org.apache.doris.datasource.CatalogIf; import org.apache.doris.datasource.ExternalCatalog; import org.apache.doris.datasource.InternalCatalog; +import org.apache.doris.plugin.PropertiesUtils; import org.apache.doris.qe.ConnectContext; import com.google.common.base.Preconditions; @@ -35,11 +35,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.ServiceLoader; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; /** * AccessControllerManager is the entry point of privilege authentication. @@ -52,19 +55,51 @@ public class AccessControllerManager { private static final Logger LOG = LogManager.getLogger(AccessControllerManager.class); private Auth auth; + // Default access controller instance used for handling cases where no specific controller is specified private CatalogAccessController defaultAccessController; + // Map that stores the mapping between catalogs and their corresponding access controllers private Map ctlToCtlAccessController = Maps.newConcurrentMap(); + // Cache of loaded access controller factories for quick creation of new access controllers + private ConcurrentHashMap accessControllerFactoriesCache + = new ConcurrentHashMap<>(); + // Mapping between access controller class names and their identifiers for easy lookup of factory identifiers + private ConcurrentHashMap accessControllerClassNameMapping = new ConcurrentHashMap<>(); public AccessControllerManager(Auth auth) { this.auth = auth; - if (Config.access_controller_type.equalsIgnoreCase("ranger-doris")) { - defaultAccessController = new RangerCacheDorisAccessController("doris"); - } else { - defaultAccessController = new InternalAccessController(auth); - } + loadAccessControllerPlugins(); + String accessControllerName = Config.access_controller_type; + this.defaultAccessController = loadAccessControllerOrThrow(accessControllerName); ctlToCtlAccessController.put(InternalCatalog.INTERNAL_CATALOG_NAME, defaultAccessController); } + private CatalogAccessController loadAccessControllerOrThrow(String accessControllerName) { + if (accessControllerName.equalsIgnoreCase("default")) { + return new InternalAccessController(auth); + } + if (accessControllerFactoriesCache.containsKey(accessControllerName)) { + Map prop; + try { + prop = PropertiesUtils.loadAccessControllerPropertiesOrNull(); + } catch (IOException e) { + throw new RuntimeException("Failed to load authorization properties." + + "Please check the configuration file, authorization name is " + accessControllerName, e); + } + return accessControllerFactoriesCache.get(accessControllerName).createAccessController(prop); + } + throw new RuntimeException("No authorization plugin factory found for " + accessControllerName + + ". Please confirm that your plugin is placed in the correct location."); + } + + private void loadAccessControllerPlugins() { + ServiceLoader loader = ServiceLoader.load(AccessControllerFactory.class); + for (AccessControllerFactory factory : loader) { + LOG.info("Found Access Controller Plugin Factory: {}", factory.factoryIdentifier()); + accessControllerFactoriesCache.put(factory.factoryIdentifier(), factory); + accessControllerClassNameMapping.put(factory.getClass().getName(), factory.factoryIdentifier()); + } + } + public CatalogAccessController getAccessControllerOrDefault(String ctl) { CatalogAccessController catalogAccessController = ctlToCtlAccessController.get(ctl); if (catalogAccessController != null) { @@ -94,23 +129,28 @@ public boolean checkIfAccessControllerExist(String ctl) { } public void createAccessController(String ctl, String acFactoryClassName, Map prop, - boolean isDryRun) { - Class factoryClazz = null; - try { - factoryClazz = Class.forName(acFactoryClassName); - AccessControllerFactory factory = (AccessControllerFactory) factoryClazz.newInstance(); - CatalogAccessController accessController = factory.createAccessController(prop); - if (!isDryRun) { - ctlToCtlAccessController.put(ctl, accessController); - LOG.info("create access controller {} for catalog {}", ctl, acFactoryClassName); - } - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); - } catch (InstantiationException e) { - throw new RuntimeException(e); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); + boolean isDryRun) { + String pluginIdentifier = getPluginIdentifierForAccessController(acFactoryClassName); + CatalogAccessController accessController = accessControllerFactoriesCache.get(pluginIdentifier) + .createAccessController(prop); + if (!isDryRun) { + ctlToCtlAccessController.put(ctl, accessController); + LOG.info("create access controller {} for catalog {}", acFactoryClassName, ctl); + } + } + + private String getPluginIdentifierForAccessController(String acClassName) { + String pluginIdentifier = null; + if (accessControllerClassNameMapping.containsKey(acClassName)) { + pluginIdentifier = accessControllerClassNameMapping.get(acClassName); + } + if (accessControllerFactoriesCache.containsKey(acClassName)) { + pluginIdentifier = acClassName; } + if (null == pluginIdentifier || !accessControllerFactoriesCache.containsKey(pluginIdentifier)) { + throw new RuntimeException("Access Controller Plugin Factory not found for " + acClassName); + } + return pluginIdentifier; } public void removeAccessController(String ctl) { @@ -160,7 +200,7 @@ public boolean checkTblPriv(ConnectContext ctx, TableName tableName, PrivPredica } public boolean checkTblPriv(ConnectContext ctx, String qualifiedCtl, - String qualifiedDb, String tbl, PrivPredicate wanted) { + String qualifiedDb, String tbl, PrivPredicate wanted) { if (ctx.isSkipAuth()) { return true; } @@ -173,9 +213,18 @@ public boolean checkTblPriv(UserIdentity currentUser, String ctl, String db, Str } // ==== Column ==== + // If param has ctx, we can skip auth by isSkipAuth field in ctx + public void checkColumnsPriv(ConnectContext ctx, String ctl, String qualifiedDb, String tbl, Set cols, + PrivPredicate wanted) throws UserException { + if (ctx.isSkipAuth()) { + return; + } + checkColumnsPriv(ctx.getCurrentUserIdentity(), ctl, qualifiedDb, tbl, cols, wanted); + } + public void checkColumnsPriv(UserIdentity currentUser, String ctl, String qualifiedDb, String tbl, Set cols, - PrivPredicate wanted) throws UserException { + PrivPredicate wanted) throws UserException { boolean hasGlobal = checkGlobalPriv(currentUser, wanted); CatalogAccessController accessController = getAccessControllerOrDefault(ctl); accessController.checkColsPriv(hasGlobal, currentUser, ctl, qualifiedDb, @@ -198,7 +247,7 @@ public boolean checkCloudPriv(ConnectContext ctx, String cloudName, PrivPredicat } public boolean checkCloudPriv(UserIdentity currentUser, String cloudName, - PrivPredicate wanted, ResourceTypeEnum type) { + PrivPredicate wanted, ResourceTypeEnum type) { return defaultAccessController.checkCloudPriv(currentUser, cloudName, wanted, type); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java index 36070d20173271..6642b28424362b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java @@ -1436,6 +1436,13 @@ private void getUserAuthInfo(List> userAuthInfos, UserIdentity user userAuthInfo.add(Joiner.on("; ").join(workloadGroupPrivs)); } + // compute groups + if (cloudClusterPrivs.isEmpty()) { + userAuthInfo.add(FeConstants.null_string); + } else { + userAuthInfo.add(Joiner.on("; ").join(cloudClusterPrivs)); + } + userAuthInfos.add(userAuthInfo); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RangerDorisAccessControllerFactory.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RangerDorisAccessControllerFactory.java new file mode 100644 index 00000000000000..297fe5c708c434 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RangerDorisAccessControllerFactory.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.mysql.privilege; + +import org.apache.doris.catalog.authorizer.ranger.doris.RangerCacheDorisAccessController; + +import java.util.Map; + +public class RangerDorisAccessControllerFactory implements AccessControllerFactory { + @Override + public String factoryIdentifier() { + return "ranger-doris"; + } + + @Override + public RangerCacheDorisAccessController createAccessController(Map prop) { + return new RangerCacheDorisAccessController("doris"); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java index 6b215982c7d926..f1db573eac5073 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/RoleManager.java @@ -199,8 +199,10 @@ public void getRoleInfo(List> results) { }, (s1, s2) -> s1 + " " + s2 )); + // METADATA in ShowRolesStmt, the 2nd CLUSTER is for compute group. Stream.of(PrivLevel.GLOBAL, PrivLevel.CATALOG, PrivLevel.DATABASE, PrivLevel.TABLE, PrivLevel.RESOURCE, - PrivLevel.CLUSTER, PrivLevel.STAGE, PrivLevel.STORAGE_VAULT, PrivLevel.WORKLOAD_GROUP) + PrivLevel.CLUSTER, PrivLevel.STAGE, PrivLevel.STORAGE_VAULT, PrivLevel.WORKLOAD_GROUP, + PrivLevel.CLUSTER) .forEach(level -> { String infoItem = infoMap.get(level); if (Strings.isNullOrEmpty(infoItem)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java index ccc21b58660b35..176e0f25801043 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java @@ -84,6 +84,7 @@ public class UserProperty implements Writable { public static final String PROP_WORKLOAD_GROUP = "default_workload_group"; public static final String DEFAULT_CLOUD_CLUSTER = "default_cloud_cluster"; + public static final String DEFAULT_COMPUTE_GROUP = "default_compute_group"; // for system user public static final Set ADVANCED_PROPERTIES = Sets.newHashSet(); @@ -142,6 +143,7 @@ public class UserProperty implements Writable { Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + PROP_WORKLOAD_GROUP + "$", Pattern.CASE_INSENSITIVE)); COMMON_PROPERTIES.add(Pattern.compile("^" + DEFAULT_CLOUD_CLUSTER + "$", Pattern.CASE_INSENSITIVE)); + COMMON_PROPERTIES.add(Pattern.compile("^" + DEFAULT_COMPUTE_GROUP + "$", Pattern.CASE_INSENSITIVE)); } public UserProperty() { @@ -263,6 +265,15 @@ public void update(List> properties, boolean isReplay) thro value = ""; } newDefaultCloudCluster = value; + } else if (keyArr[0].equalsIgnoreCase(DEFAULT_COMPUTE_GROUP)) { + // set property "DEFAULT_CLOUD_CLUSTER" = "cluster1" + if (keyArr.length != 1) { + throw new DdlException(DEFAULT_COMPUTE_GROUP + " format error"); + } + if (value == null) { + value = ""; + } + newDefaultCloudCluster = value; } else if (keyArr[0].equalsIgnoreCase(PROP_MAX_QUERY_INSTANCES)) { // set property "max_query_instances" = "1000" if (keyArr.length != 1) { @@ -536,6 +547,13 @@ public List> fetchProperty() { result.add(Lists.newArrayList(DEFAULT_CLOUD_CLUSTER, "")); } + // default cloud cluster + if (defaultCloudCluster != null) { + result.add(Lists.newArrayList(DEFAULT_COMPUTE_GROUP, defaultCloudCluster)); + } else { + result.add(Lists.newArrayList(DEFAULT_COMPUTE_GROUP, "")); + } + for (Map.Entry entry : clusterToDppConfig.entrySet()) { String cluster = entry.getKey(); DppConfig dppConfig = entry.getValue(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java index bd74c5835e287f..b51f9ce24a3e4d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java @@ -22,6 +22,7 @@ import org.apache.doris.analysis.StatementBase; import org.apache.doris.catalog.Column; import org.apache.doris.catalog.TableIf; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.FormatOptions; import org.apache.doris.common.NereidsException; import org.apache.doris.common.Pair; @@ -48,6 +49,7 @@ import org.apache.doris.nereids.processor.pre.PlanPreprocessors; import org.apache.doris.nereids.properties.PhysicalProperties; import org.apache.doris.nereids.rules.exploration.mv.MaterializationContext; +import org.apache.doris.nereids.stats.StatsCalculator; import org.apache.doris.nereids.trees.expressions.NamedExpression; import org.apache.doris.nereids.trees.expressions.SlotReference; import org.apache.doris.nereids.trees.plans.ComputeResultSet; @@ -56,6 +58,7 @@ import org.apache.doris.nereids.trees.plans.distribute.DistributePlanner; import org.apache.doris.nereids.trees.plans.distribute.DistributedPlan; import org.apache.doris.nereids.trees.plans.distribute.FragmentIdMapping; +import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.trees.plans.logical.LogicalSqlCache; import org.apache.doris.nereids.trees.plans.physical.PhysicalPlan; @@ -256,6 +259,17 @@ private Plan planWithoutLock( } } + // if we cannot get table row count, skip join reorder + // except: + // 1. user set leading hint + // 2. ut test. In ut test, FeConstants.enableInternalSchemaDb is false or FeConstants.runningUnitTest is true + if (FeConstants.enableInternalSchemaDb && !FeConstants.runningUnitTest + && !cascadesContext.isLeadingDisableJoinReorder()) { + List scans = cascadesContext.getRewritePlan() + .collectToList(LogicalOlapScan.class::isInstance); + StatsCalculator.disableJoinReorderIfTableRowCountNotAvailable(scans, cascadesContext); + } + optimize(); if (statementContext.getConnectContext().getExecutor() != null) { statementContext.getConnectContext().getExecutor().getSummaryProfile().setNereidsOptimizeTime(); @@ -673,7 +687,7 @@ public Optional handleQueryInFe(StatementBase parsedStmt) { if (physicalPlan instanceof ComputeResultSet) { Optional sqlCacheContext = statementContext.getSqlCacheContext(); Optional resultSet = ((ComputeResultSet) physicalPlan) - .computeResultInFe(cascadesContext, sqlCacheContext); + .computeResultInFe(cascadesContext, sqlCacheContext, physicalPlan.getOutput()); if (resultSet.isPresent()) { return resultSet; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java index 1a56c0fa16076c..0bf767ac880404 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java @@ -361,8 +361,7 @@ public PlanFragment visitPhysicalDistribute(PhysicalDistribute d MultiCastDataSink multiCastDataSink = (MultiCastDataSink) inputFragment.getSink(); DataStreamSink dataStreamSink = multiCastDataSink.getDataStreamSinks().get( multiCastDataSink.getDataStreamSinks().size() - 1); - TupleDescriptor tupleDescriptor = generateTupleDesc(distribute.getOutput(), null, context); - exchangeNode.updateTupleIds(tupleDescriptor); + exchangeNode.updateTupleIds(dataStreamSink.getOutputTupleDesc()); dataStreamSink.setExchNodeId(exchangeNode.getId()); dataStreamSink.setOutputPartition(dataPartition); parentFragment.addChild(inputFragment); @@ -1256,9 +1255,6 @@ public PlanFragment visitPhysicalCTEProducer(PhysicalCTEProducer multiCastPlanFragment.setOutputExprs(outputs); context.getCteProduceFragments().put(cteId, multiCastPlanFragment); context.getCteProduceMap().put(cteId, cteProducer); - if (context.getRuntimeTranslator().isPresent()) { - context.getRuntimeTranslator().get().getContext().getCteProduceMap().put(cteId, cteProducer); - } context.getPlanFragments().add(multiCastPlanFragment); return child; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java index 1ffbac97d741a4..6f6c022117c337 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java @@ -38,6 +38,7 @@ import org.apache.doris.nereids.rules.analysis.HavingToFilter; import org.apache.doris.nereids.rules.analysis.LeadingJoin; import org.apache.doris.nereids.rules.analysis.NormalizeAggregate; +import org.apache.doris.nereids.rules.analysis.NormalizeGenerate; import org.apache.doris.nereids.rules.analysis.NormalizeRepeat; import org.apache.doris.nereids.rules.analysis.OneRowRelationExtractAggregate; import org.apache.doris.nereids.rules.analysis.ProjectToGlobalAggregate; @@ -170,6 +171,7 @@ private static List buildAnalyzerJobs(Optional new CollectJoinConstraint() ), topDown(new LeadingJoin()), + bottomUp(new NormalizeGenerate()), bottomUp(new SubqueryToApply()), topDown(new MergeProjects()) ); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java index 4ab4165a446733..9c3d3ebecbad8c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java @@ -188,7 +188,12 @@ public class Rewriter extends AbstractBatchJobExecutor { // after doing NormalizeAggregate in analysis job // we need run the following 2 rules to make AGG_SCALAR_SUBQUERY_TO_WINDOW_FUNCTION work bottomUp(new PullUpProjectUnderApply()), - topDown(new PushDownFilterThroughProject()), + topDown( + new PushDownFilterThroughProject(), + // the subquery may have where and having clause + // so there may be two filters we need to merge them + new MergeFilters() + ), custom(RuleType.AGG_SCALAR_SUBQUERY_TO_WINDOW_FUNCTION, AggScalarSubQueryToWindowFunction::new), bottomUp( diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/scheduler/SimpleJobScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/scheduler/SimpleJobScheduler.java index 32a82127e6dbe2..e31c92e3fe004c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/scheduler/SimpleJobScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/scheduler/SimpleJobScheduler.java @@ -18,7 +18,7 @@ package org.apache.doris.nereids.jobs.scheduler; import org.apache.doris.nereids.CascadesContext; -import org.apache.doris.nereids.exceptions.DoNotFallbackException; +import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.jobs.Job; import org.apache.doris.qe.SessionVariable; @@ -37,7 +37,7 @@ public void executeJobPool(ScheduleContext scheduleContext) { if (sessionVariable.enableNereidsTimeout && context.getStatementContext().getStopwatch().elapsed(TimeUnit.MILLISECONDS) > sessionVariable.nereidsTimeoutSecond * 1000L) { - throw new DoNotFallbackException( + throw new AnalysisException( "Nereids cost too much time ( > " + sessionVariable.nereidsTimeoutSecond + "s )"); } Job job = pool.pop(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/GroupExpression.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/GroupExpression.java index f6450bca07341b..822c6bb6f1d176 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/GroupExpression.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/GroupExpression.java @@ -276,7 +276,27 @@ public void mergeToNotOwnerRemove(GroupExpression target) { this.getLowestCostTable() .forEach((properties, pair) -> target.updateLowestCostTable(properties, pair.second, pair.first)); // requestPropertiesMap - target.requestPropertiesMap.putAll(this.requestPropertiesMap); + // ATTN: when do merge, we should update target requestPropertiesMap + // ONLY IF the cost of source's request property lower than target one. + // Otherwise, the requestPropertiesMap will not sync with lowestCostTable. + // Then, we will get wrong output property when get the final plan. + for (Map.Entry entry : requestPropertiesMap.entrySet()) { + PhysicalProperties request = entry.getKey(); + if (!target.requestPropertiesMap.containsKey(request)) { + target.requestPropertiesMap.put(entry.getKey(), entry.getValue()); + } else { + PhysicalProperties sourceOutput = entry.getValue(); + PhysicalProperties targetOutput = target.getRequestPropertiesMap().get(request); + if (this.getLowestCostTable().containsKey(sourceOutput) + && target.getLowestCostTable().containsKey(targetOutput)) { + Cost sourceCost = this.getLowestCostTable().get(sourceOutput).first; + Cost targetCost = target.getLowestCostTable().get(targetOutput).first; + if (sourceCost.getValue() < targetCost.getValue()) { + target.requestPropertiesMap.put(entry.getKey(), entry.getValue()); + } + } + } + } // ruleMasks target.ruleMasks.or(this.ruleMasks); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java index 36ee590c0e946a..3e5ba15b0f7722 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java @@ -2612,7 +2612,11 @@ public LogicalPlan visitCreateView(CreateViewContext ctx) { String comment = ctx.STRING_LITERAL() == null ? "" : LogicalPlanBuilderAssistant.escapeBackSlash( ctx.STRING_LITERAL().getText().substring(1, ctx.STRING_LITERAL().getText().length() - 1)); String querySql = getOriginSql(ctx.query()); - CreateViewInfo info = new CreateViewInfo(ctx.EXISTS() != null, new TableNameInfo(nameParts), + if (ctx.REPLACE() != null && ctx.EXISTS() != null) { + throw new AnalysisException("[OR REPLACE] and [IF NOT EXISTS] cannot used at the same time"); + } + CreateViewInfo info = new CreateViewInfo(ctx.EXISTS() != null, ctx.REPLACE() != null, + new TableNameInfo(nameParts), comment, querySql, ctx.cols == null ? Lists.newArrayList() : visitSimpleColumnDefs(ctx.cols)); return new CreateViewCommand(info); @@ -2789,6 +2793,8 @@ public ColumnDefinition visitColumnDef(ColumnDefContext ctx) { defaultValue = Optional.of(DefaultValue.PI_DEFAULT_VALUE); } else if (ctx.E() != null) { defaultValue = Optional.of(DefaultValue.E_NUM_DEFAULT_VALUE); + } else if (ctx.BITMAP_EMPTY() != null) { + defaultValue = Optional.of(DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE); } } if (ctx.UPDATE() != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/PushDownFilterThroughProject.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/PushDownFilterThroughProject.java index 864e817dc1f4de..671abc2c490a3b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/PushDownFilterThroughProject.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/PushDownFilterThroughProject.java @@ -32,7 +32,8 @@ public class PushDownFilterThroughProject extends PlanPostProcessor { public Plan visitPhysicalFilter(PhysicalFilter filter, CascadesContext context) { filter = (PhysicalFilter) super.visit(filter, context); Plan child = filter.child(); - if (!(child instanceof PhysicalProject)) { + // don't push down filter if child project contains NoneMovableFunction + if (!(child instanceof PhysicalProject) || ((PhysicalProject) child).containsNoneMovableFunction()) { return filter; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterContext.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterContext.java index 746bb05e9fd191..04e254f549aa64 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterContext.java @@ -19,7 +19,6 @@ import org.apache.doris.analysis.SlotRef; import org.apache.doris.common.Pair; -import org.apache.doris.nereids.trees.expressions.CTEId; import org.apache.doris.nereids.trees.expressions.EqualPredicate; import org.apache.doris.nereids.trees.expressions.ExprId; import org.apache.doris.nereids.trees.expressions.Expression; @@ -27,7 +26,6 @@ import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.physical.AbstractPhysicalJoin; -import org.apache.doris.nereids.trees.plans.physical.PhysicalCTEProducer; import org.apache.doris.nereids.trees.plans.physical.PhysicalHashJoin; import org.apache.doris.nereids.trees.plans.physical.PhysicalRelation; import org.apache.doris.nereids.trees.plans.physical.RuntimeFilter; @@ -118,11 +116,6 @@ public boolean equals(Object other) { private final Map effectiveSrcNodes = Maps.newHashMap(); - private final Map cteProducerMap = Maps.newLinkedHashMap(); - - // cte whose runtime filter has been extracted - private final Set processedCTE = Sets.newHashSet(); - private final SessionVariable sessionVariable; private final FilterSizeLimits limits; @@ -152,6 +145,7 @@ public ExpandRF(AbstractPhysicalJoin buildNode, PhysicalRelation srcNode, this.srcNode = srcNode; this.target1 = target1; this.target2 = target2; + this.equal = equal; } } @@ -160,10 +154,6 @@ public RuntimeFilterContext(SessionVariable sessionVariable) { this.limits = new FilterSizeLimits(sessionVariable); } - public void setRelationsUsedByPlan(Plan plan, Set relations) { - relationsUsedByPlan.put(plan, relations); - } - /** * return true, if the relation is in the subtree */ @@ -185,14 +175,6 @@ public FilterSizeLimits getLimits() { return limits; } - public Map getCteProduceMap() { - return cteProducerMap; - } - - public Set getProcessedCTE() { - return processedCTE; - } - public void setTargetExprIdToFilter(ExprId id, RuntimeFilter filter) { Preconditions.checkArgument(filter.getTargetSlots().stream().anyMatch(expr -> expr.getExprId() == id)); this.targetExprIdToFilter.computeIfAbsent(id, k -> Lists.newArrayList()).add(filter); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java index 1192a66069716a..3cff9dfd001901 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/RuntimeFilterGenerator.java @@ -101,132 +101,131 @@ public class RuntimeFilterGenerator extends PlanPostProcessor { @Override public Plan processRoot(Plan plan, CascadesContext ctx) { Plan result = plan.accept(this, ctx); - // cte rf + // try to push rf inside CTEProducer + // collect cteProducers RuntimeFilterContext rfCtx = ctx.getRuntimeFilterContext(); - int cteCount = rfCtx.getProcessedCTE().size(); - if (cteCount != 0) { - Map> cteIdToConsumersWithRF = Maps.newHashMap(); - Map> cteToRFsMap = Maps.newHashMap(); - Map> consumerToRFs = Maps.newHashMap(); - Map> consumerToSrcExpression = Maps.newHashMap(); - List allRFs = rfCtx.getNereidsRuntimeFilter(); - for (RuntimeFilter rf : allRFs) { - for (PhysicalRelation rel : rf.getTargetScans()) { - if (rel instanceof PhysicalCTEConsumer) { - PhysicalCTEConsumer consumer = (PhysicalCTEConsumer) rel; - CTEId cteId = consumer.getCteId(); - cteToRFsMap.computeIfAbsent(cteId, key -> Lists.newArrayList()).add(rf); - cteIdToConsumersWithRF.computeIfAbsent(cteId, key -> Sets.newHashSet()).add(consumer); - consumerToRFs.computeIfAbsent(consumer, key -> Sets.newHashSet()).add(rf); - consumerToSrcExpression.computeIfAbsent(consumer, key -> Sets.newHashSet()) - .add(rf.getSrcExpr()); - } + Map cteProducerMap = plan.collect(PhysicalCTEProducer.class::isInstance) + .stream().collect(Collectors.toMap(p -> ((PhysicalCTEProducer) p).getCteId(), + p -> (PhysicalCTEProducer) p)); + // collect cteConsumers which are RF targets + Map> cteIdToConsumersWithRF = Maps.newHashMap(); + Map> consumerToRFs = Maps.newHashMap(); + Map> consumerToSrcExpression = Maps.newHashMap(); + List allRFs = rfCtx.getNereidsRuntimeFilter(); + for (RuntimeFilter rf : allRFs) { + for (PhysicalRelation rel : rf.getTargetScans()) { + if (rel instanceof PhysicalCTEConsumer) { + PhysicalCTEConsumer consumer = (PhysicalCTEConsumer) rel; + CTEId cteId = consumer.getCteId(); + cteIdToConsumersWithRF.computeIfAbsent(cteId, key -> Sets.newHashSet()).add(consumer); + consumerToRFs.computeIfAbsent(consumer, key -> Sets.newHashSet()).add(rf); + consumerToSrcExpression.computeIfAbsent(consumer, key -> Sets.newHashSet()) + .add(rf.getSrcExpr()); } } - for (CTEId cteId : rfCtx.getCteProduceMap().keySet()) { - // if any consumer does not have RF, RF cannot be pushed down. - // cteIdToConsumersWithRF.get(cteId).size() can not be 1, o.w. this cte will be inlined. - if (cteIdToConsumersWithRF.get(cteId) != null - && ctx.getCteIdToConsumers().get(cteId).size() == cteIdToConsumersWithRF.get(cteId).size() - && cteIdToConsumersWithRF.get(cteId).size() >= 2) { - // check if there is a common srcExpr among all the consumers - Set consumers = cteIdToConsumersWithRF.get(cteId); - PhysicalCTEConsumer consumer0 = consumers.iterator().next(); - Set candidateSrcExpressions = consumerToSrcExpression.get(consumer0); - for (PhysicalCTEConsumer currentConsumer : consumers) { - Set srcExpressionsOnCurrentConsumer = consumerToSrcExpression.get(currentConsumer); - candidateSrcExpressions.retainAll(srcExpressionsOnCurrentConsumer); - if (candidateSrcExpressions.isEmpty()) { - break; - } + } + for (CTEId cteId : cteIdToConsumersWithRF.keySet()) { + // if any consumer does not have RF, RF cannot be pushed down. + // cteIdToConsumersWithRF.get(cteId).size() can not be 1, o.w. this cte will be inlined. + if (ctx.getCteIdToConsumers().get(cteId).size() == cteIdToConsumersWithRF.get(cteId).size() + && cteIdToConsumersWithRF.get(cteId).size() >= 2) { + // check if there is a common srcExpr among all the consumers + Set consumers = cteIdToConsumersWithRF.get(cteId); + PhysicalCTEConsumer consumer0 = consumers.iterator().next(); + Set candidateSrcExpressions = consumerToSrcExpression.get(consumer0); + for (PhysicalCTEConsumer currentConsumer : consumers) { + Set srcExpressionsOnCurrentConsumer = consumerToSrcExpression.get(currentConsumer); + candidateSrcExpressions.retainAll(srcExpressionsOnCurrentConsumer); + if (candidateSrcExpressions.isEmpty()) { + break; } - if (!candidateSrcExpressions.isEmpty()) { - // find RFs to push down - for (Expression srcExpr : candidateSrcExpressions) { - List rfsToPushDown = Lists.newArrayList(); - for (PhysicalCTEConsumer consumer : cteIdToConsumersWithRF.get(cteId)) { - for (RuntimeFilter rf : consumerToRFs.get(consumer)) { - if (rf.getSrcExpr().equals(srcExpr)) { - rfsToPushDown.add(rf); - } + } + if (!candidateSrcExpressions.isEmpty()) { + // find RFs to push down + for (Expression srcExpr : candidateSrcExpressions) { + List rfsToPushDown = Lists.newArrayList(); + for (PhysicalCTEConsumer consumer : cteIdToConsumersWithRF.get(cteId)) { + for (RuntimeFilter rf : consumerToRFs.get(consumer)) { + if (rf.getSrcExpr().equals(srcExpr)) { + rfsToPushDown.add(rf); } } - if (rfsToPushDown.isEmpty()) { - break; - } + } + if (rfsToPushDown.isEmpty()) { + break; + } - // the most right deep buildNode from rfsToPushDown is used as buildNode for pushDown rf - // since the srcExpr are the same, all buildNodes of rfToPushDown are in the same tree path - // the longest ancestors means its corresponding rf build node is the most right deep one. - List rightDeepRfs = Lists.newArrayList(); - List rightDeepAncestors = rfsToPushDown.get(0).getBuilderNode().getAncestors(); - int rightDeepAncestorsSize = rightDeepAncestors.size(); - RuntimeFilter leftTop = rfsToPushDown.get(0); - int leftTopAncestorsSize = rightDeepAncestorsSize; - for (RuntimeFilter rf : rfsToPushDown) { - List ancestors = rf.getBuilderNode().getAncestors(); - int currentAncestorsSize = ancestors.size(); - if (currentAncestorsSize >= rightDeepAncestorsSize) { - if (currentAncestorsSize == rightDeepAncestorsSize) { - rightDeepRfs.add(rf); - } else { - rightDeepAncestorsSize = currentAncestorsSize; - rightDeepAncestors = ancestors; - rightDeepRfs.clear(); - rightDeepRfs.add(rf); - } - } - if (currentAncestorsSize < leftTopAncestorsSize) { - leftTopAncestorsSize = currentAncestorsSize; - leftTop = rf; + // the most right deep buildNode from rfsToPushDown is used as buildNode for pushDown rf + // since the srcExpr are the same, all buildNodes of rfToPushDown are in the same tree path + // the longest ancestors means its corresponding rf build node is the most right deep one. + List rightDeepRfs = Lists.newArrayList(); + List rightDeepAncestors = rfsToPushDown.get(0).getBuilderNode().getAncestors(); + int rightDeepAncestorsSize = rightDeepAncestors.size(); + RuntimeFilter leftTop = rfsToPushDown.get(0); + int leftTopAncestorsSize = rightDeepAncestorsSize; + for (RuntimeFilter rf : rfsToPushDown) { + List ancestors = rf.getBuilderNode().getAncestors(); + int currentAncestorsSize = ancestors.size(); + if (currentAncestorsSize >= rightDeepAncestorsSize) { + if (currentAncestorsSize == rightDeepAncestorsSize) { + rightDeepRfs.add(rf); + } else { + rightDeepAncestorsSize = currentAncestorsSize; + rightDeepAncestors = ancestors; + rightDeepRfs.clear(); + rightDeepRfs.add(rf); } } - Preconditions.checkArgument(rightDeepAncestors.contains(leftTop.getBuilderNode())); - // check nodes between right deep and left top are SPJ and not denied join and not mark join - boolean valid = true; - for (Plan cursor : rightDeepAncestors) { - if (cursor.equals(leftTop.getBuilderNode())) { - break; - } - // valid = valid && SPJ_PLAN.contains(cursor.getClass()); - if (cursor instanceof AbstractPhysicalJoin) { - AbstractPhysicalJoin cursorJoin = (AbstractPhysicalJoin) cursor; - valid = (!RuntimeFilterGenerator.DENIED_JOIN_TYPES - .contains(cursorJoin.getJoinType()) - || cursorJoin.isMarkJoin()) && valid; - } - if (!valid) { - break; - } + if (currentAncestorsSize < leftTopAncestorsSize) { + leftTopAncestorsSize = currentAncestorsSize; + leftTop = rf; + } + } + Preconditions.checkArgument(rightDeepAncestors.contains(leftTop.getBuilderNode())); + // check nodes between right deep and left top are SPJ and not denied join and not mark join + boolean valid = true; + for (Plan cursor : rightDeepAncestors) { + if (cursor.equals(leftTop.getBuilderNode())) { + break; + } + // valid = valid && SPJ_PLAN.contains(cursor.getClass()); + if (cursor instanceof AbstractPhysicalJoin) { + AbstractPhysicalJoin cursorJoin = (AbstractPhysicalJoin) cursor; + valid = (!RuntimeFilterGenerator.DENIED_JOIN_TYPES + .contains(cursorJoin.getJoinType()) + || cursorJoin.isMarkJoin()) && valid; } - if (!valid) { break; } + } + + if (!valid) { + break; + } - for (RuntimeFilter rfToPush : rightDeepRfs) { - Expression rightDeepTargetExpressionOnCTE = null; - int targetCount = rfToPush.getTargetExpressions().size(); - for (int i = 0; i < targetCount; i++) { - PhysicalRelation rel = rfToPush.getTargetScans().get(i); - if (rel instanceof PhysicalCTEConsumer - && ((PhysicalCTEConsumer) rel).getCteId().equals(cteId)) { - rightDeepTargetExpressionOnCTE = rfToPush.getTargetExpressions().get(i); - break; - } + for (RuntimeFilter rfToPush : rightDeepRfs) { + Expression rightDeepTargetExpressionOnCTE = null; + int targetCount = rfToPush.getTargetExpressions().size(); + for (int i = 0; i < targetCount; i++) { + PhysicalRelation rel = rfToPush.getTargetScans().get(i); + if (rel instanceof PhysicalCTEConsumer + && ((PhysicalCTEConsumer) rel).getCteId().equals(cteId)) { + rightDeepTargetExpressionOnCTE = rfToPush.getTargetExpressions().get(i); + break; } + } - boolean pushedDown = doPushDownIntoCTEProducerInternal( + boolean pushedDown = doPushDownIntoCTEProducerInternal( + rfToPush, + rightDeepTargetExpressionOnCTE, + rfCtx, + cteProducerMap.get(cteId) + ); + if (pushedDown) { + rfCtx.removeFilter( rfToPush, - rightDeepTargetExpressionOnCTE, - rfCtx, - rfCtx.getCteProduceMap().get(cteId) - ); - if (pushedDown) { - rfCtx.removeFilter( - rfToPush, - rightDeepTargetExpressionOnCTE.getInputSlotExprIds().iterator().next()); - } + rightDeepTargetExpressionOnCTE.getInputSlotExprIds().iterator().next()); } } } @@ -265,8 +264,8 @@ public PhysicalPlan visitPhysicalHashJoin(PhysicalHashJoin (type.getValue() & ctx.getSessionVariable().getRuntimeFilterType()) > 0) .collect(Collectors.toList()); - List hashJoinConjuncts = join.getHashJoinConjuncts().stream().collect(Collectors.toList()); - boolean buildSideContainsConsumer = hasCTEConsumerDescendant((PhysicalPlan) join.right()); + List hashJoinConjuncts = join.getHashJoinConjuncts(); + for (int i = 0; i < hashJoinConjuncts.size(); i++) { EqualPredicate equalTo = JoinUtils.swapEqualToForChildrenOrder( (EqualPredicate) hashJoinConjuncts.get(i), join.left().getOutputSet()); @@ -277,9 +276,7 @@ public PhysicalPlan visitPhysicalHashJoin(PhysicalHashJoin pair = ctx.getAliasTransferMap().get(equalTo.right()); - // CteConsumer is not allowed to generate RF in order to avoid RF cycle. - if ((pair == null && buildSideContainsConsumer) - || (pair != null && pair.first instanceof PhysicalCTEConsumer)) { + if (pair == null) { continue; } if (equalTo.left().getInputSlots().size() == 1) { @@ -306,20 +303,6 @@ public PhysicalCTEConsumer visitPhysicalCTEConsumer(PhysicalCTEConsumer scan, Ca return scan; } - @Override - public PhysicalCTEProducer visitPhysicalCTEProducer(PhysicalCTEProducer producer, - CascadesContext context) { - CTEId cteId = producer.getCteId(); - context.getRuntimeFilterContext().getCteProduceMap().put(cteId, producer); - Set processedCTE = context.getRuntimeFilterContext().getProcessedCTE(); - if (!processedCTE.contains(cteId)) { - PhysicalPlan inputPlanNode = (PhysicalPlan) producer.child(0); - inputPlanNode.accept(this, context); - processedCTE.add(cteId); - } - return producer; - } - private void generateBitMapRuntimeFilterForNLJ(PhysicalNestedLoopJoin join, RuntimeFilterContext ctx) { if (join.getJoinType() != JoinType.LEFT_SEMI_JOIN && join.getJoinType() != JoinType.CROSS_JOIN) { @@ -680,38 +663,4 @@ public static void getAllScanInfo(Plan root, Set scans) { } } } - - /** - * Check whether plan root contains cte consumer descendant. - */ - public static boolean hasCTEConsumerDescendant(PhysicalPlan root) { - if (root instanceof PhysicalCTEConsumer) { - return true; - } else if (root.children().size() == 1) { - return hasCTEConsumerDescendant((PhysicalPlan) root.child(0)); - } else { - for (Object child : root.children()) { - if (hasCTEConsumerDescendant((PhysicalPlan) child)) { - return true; - } - } - return false; - } - } - - /** - * Check whether runtime filter target is remote or local - */ - public static boolean hasRemoteTarget(AbstractPlan join, AbstractPlan scan) { - if (scan instanceof PhysicalCTEConsumer) { - return true; - } else { - Preconditions.checkArgument(join.getMutableState(AbstractPlan.FRAGMENT_ID).isPresent(), - "cannot find fragment id for Join node"); - Preconditions.checkArgument(scan.getMutableState(AbstractPlan.FRAGMENT_ID).isPresent(), - "cannot find fragment id for scan node"); - return join.getMutableState(AbstractPlan.FRAGMENT_ID).get() - != scan.getMutableState(AbstractPlan.FRAGMENT_ID).get(); - } - } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/Validator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/Validator.java index 4504b92fc7f841..62881a463d1bc4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/Validator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/Validator.java @@ -61,7 +61,8 @@ public Plan visitPhysicalFilter(PhysicalFilter filter, CascadesC Plan child = filter.child(); // Forbidden filter-project, we must make filter-project -> project-filter. - if (child instanceof PhysicalProject) { + // except that the project contains NoneMovableFunction + if (child instanceof PhysicalProject && !((PhysicalProject) child).containsNoneMovableFunction()) { throw new AnalysisException( "Nereids generate a filter-project plan, but backend not support:\n" + filter.treeString()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java index d345d9057e9b43..67bdef4ef85d65 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java @@ -100,6 +100,7 @@ public enum RuleType { NORMALIZE_AGGREGATE(RuleTypeClass.REWRITE), NORMALIZE_SORT(RuleTypeClass.REWRITE), NORMALIZE_REPEAT(RuleTypeClass.REWRITE), + NORMALIZE_GENERATE(RuleTypeClass.REWRITE), EXTRACT_AND_NORMALIZE_WINDOW_EXPRESSIONS(RuleTypeClass.REWRITE), SIMPLIFY_WINDOW_EXPRESSION(RuleTypeClass.REWRITE), CHECK_AND_STANDARDIZE_WINDOW_FUNCTION_AND_FRAME(RuleTypeClass.REWRITE), diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/EliminateLogicalSelectHint.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/EliminateLogicalSelectHint.java index b0f55365f332fd..1275c33a565909 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/EliminateLogicalSelectHint.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/EliminateLogicalSelectHint.java @@ -23,7 +23,6 @@ import org.apache.doris.nereids.CascadesContext; import org.apache.doris.nereids.StatementContext; import org.apache.doris.nereids.exceptions.AnalysisException; -import org.apache.doris.nereids.exceptions.MustFallbackException; import org.apache.doris.nereids.hint.Hint; import org.apache.doris.nereids.hint.LeadingHint; import org.apache.doris.nereids.hint.OrderedHint; @@ -108,12 +107,6 @@ private void setVar(SelectHintSetVar selectHint, StatementContext context) { } } } - // if sv set enable_nereids_planner=true and hint set enable_nereids_planner=false, we should set - // enable_fallback_to_original_planner=true and revert it after executing. - // throw exception to fall back to original planner - if (!sessionVariable.isEnableNereidsPlanner()) { - throw new MustFallbackException("The nereids is disabled in this sql, fallback to original planner"); - } } private void extractLeading(SelectHintLeading selectHint, CascadesContext context, diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/ExpressionAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/ExpressionAnalyzer.java index 8624ba205c5d7f..49789aa66e1ff8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/ExpressionAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/ExpressionAnalyzer.java @@ -65,15 +65,12 @@ import org.apache.doris.nereids.trees.expressions.WhenClause; import org.apache.doris.nereids.trees.expressions.functions.BoundFunction; import org.apache.doris.nereids.trees.expressions.functions.FunctionBuilder; -import org.apache.doris.nereids.trees.expressions.functions.agg.Count; import org.apache.doris.nereids.trees.expressions.functions.scalar.ElementAt; import org.apache.doris.nereids.trees.expressions.functions.scalar.Lambda; -import org.apache.doris.nereids.trees.expressions.functions.scalar.Nvl; import org.apache.doris.nereids.trees.expressions.functions.udf.AliasUdfBuilder; import org.apache.doris.nereids.trees.expressions.functions.udf.JavaUdaf; import org.apache.doris.nereids.trees.expressions.functions.udf.JavaUdf; import org.apache.doris.nereids.trees.expressions.functions.udf.UdfBuilder; -import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.literal.IntegerLikeLiteral; import org.apache.doris.nereids.trees.expressions.literal.Literal; import org.apache.doris.nereids.trees.expressions.literal.StringLiteral; @@ -425,18 +422,6 @@ public Expression visitUnboundFunction(UnboundFunction unboundFunction, Expressi return buildResult.first; } else { Expression castFunction = TypeCoercionUtils.processBoundFunction((BoundFunction) buildResult.first); - if (castFunction instanceof Count - && context != null - && context.cascadesContext.getOuterScope().isPresent() - && !context.cascadesContext.getOuterScope().get().getCorrelatedSlots().isEmpty()) { - // consider sql: SELECT * FROM t1 WHERE t1.a <= (SELECT COUNT(t2.a) FROM t2 WHERE (t1.b = t2.b)); - // when unnest correlated subquery, we create a left join node. - // outer query is left table and subquery is right one - // if there is no match, the row from right table is filled with nulls - // but COUNT function is always not nullable. - // so wrap COUNT with Nvl to ensure it's result is 0 instead of null to get the correct result - castFunction = new Nvl(castFunction, new BigIntLiteral(0)); - } return castFunction; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java index f78beb130e5ed3..c55ed5957ba20c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java @@ -18,6 +18,7 @@ package org.apache.doris.nereids.rules.analysis; import org.apache.doris.common.Pair; +import org.apache.doris.nereids.analyzer.Scope; import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.properties.OrderKey; import org.apache.doris.nereids.rules.Rule; @@ -52,6 +53,8 @@ * Resolve having clause to the aggregation/repeat. * need Top to Down to traverse plan, * because we need to process FILL_UP_SORT_HAVING_AGGREGATE before FILL_UP_HAVING_AGGREGATE. + * be aware that when filling up the missing slots, we should exclude outer query's correlated slots. + * because these correlated slots belong to outer query, so should not try to find them in child node. */ public class FillUpMissingSlots implements AnalysisRuleFactory { @Override @@ -59,14 +62,18 @@ public List buildRules() { return ImmutableList.of( RuleType.FILL_UP_SORT_PROJECT.build( logicalSort(logicalProject()) - .then(sort -> { + .thenApply(ctx -> { + LogicalSort> sort = ctx.root; + Optional outerScope = ctx.cascadesContext.getOuterScope(); LogicalProject project = sort.child(); Set projectOutputSet = project.getOutputSet(); Set notExistedInProject = sort.getOrderKeys().stream() .map(OrderKey::getExpr) .map(Expression::getInputSlots) .flatMap(Set::stream) - .filter(s -> !projectOutputSet.contains(s)) + .filter(s -> !projectOutputSet.contains(s) + && (!outerScope.isPresent() || !outerScope.get() + .getCorrelatedSlots().contains(s))) .collect(Collectors.toSet()); if (notExistedInProject.isEmpty()) { return null; @@ -82,7 +89,9 @@ public List buildRules() { aggregate(logicalHaving(aggregate())) .when(a -> a.getOutputExpressions().stream().allMatch(SlotReference.class::isInstance)) ).when(this::checkSort) - .then(sort -> processDistinctProjectWithAggregate(sort, sort.child(), sort.child().child().child())) + .thenApply(ctx -> processDistinctProjectWithAggregate(ctx.root, + ctx.root.child(), ctx.root.child().child().child(), + ctx.cascadesContext.getOuterScope())) ), // ATTN: process aggregate with distinct project, must run this rule before FILL_UP_SORT_AGGREGATE // because this pattern will always fail in FILL_UP_SORT_AGGREGATE @@ -91,14 +100,17 @@ public List buildRules() { aggregate(aggregate()) .when(a -> a.getOutputExpressions().stream().allMatch(SlotReference.class::isInstance)) ).when(this::checkSort) - .then(sort -> processDistinctProjectWithAggregate(sort, sort.child(), sort.child().child())) + .thenApply(ctx -> processDistinctProjectWithAggregate(ctx.root, + ctx.root.child(), ctx.root.child().child(), + ctx.cascadesContext.getOuterScope())) ), RuleType.FILL_UP_SORT_AGGREGATE.build( logicalSort(aggregate()) .when(this::checkSort) - .then(sort -> { + .thenApply(ctx -> { + LogicalSort> sort = ctx.root; Aggregate agg = sort.child(); - Resolver resolver = new Resolver(agg); + Resolver resolver = new Resolver(agg, ctx.cascadesContext.getOuterScope()); sort.getExpressions().forEach(resolver::resolve); return createPlan(resolver, agg, (r, a) -> { List newOrderKeys = sort.getOrderKeys().stream() @@ -118,10 +130,11 @@ public List buildRules() { RuleType.FILL_UP_SORT_HAVING_AGGREGATE.build( logicalSort(logicalHaving(aggregate())) .when(this::checkSort) - .then(sort -> { + .thenApply(ctx -> { + LogicalSort>> sort = ctx.root; LogicalHaving> having = sort.child(); Aggregate agg = having.child(); - Resolver resolver = new Resolver(agg); + Resolver resolver = new Resolver(agg, ctx.cascadesContext.getOuterScope()); sort.getExpressions().forEach(resolver::resolve); return createPlan(resolver, agg, (r, a) -> { List newOrderKeys = sort.getOrderKeys().stream() @@ -138,13 +151,17 @@ public List buildRules() { }) ), RuleType.FILL_UP_SORT_HAVING_PROJECT.build( - logicalSort(logicalHaving(logicalProject())).then(sort -> { + logicalSort(logicalHaving(logicalProject())).thenApply(ctx -> { + LogicalSort>> sort = ctx.root; + Optional outerScope = ctx.cascadesContext.getOuterScope(); Set childOutput = sort.child().getOutputSet(); Set notExistedInProject = sort.getOrderKeys().stream() .map(OrderKey::getExpr) .map(Expression::getInputSlots) .flatMap(Set::stream) - .filter(s -> !childOutput.contains(s)) + .filter(s -> !childOutput.contains(s) + && (!outerScope.isPresent() || !outerScope.get() + .getCorrelatedSlots().contains(s))) .collect(Collectors.toSet()); if (notExistedInProject.isEmpty()) { return null; @@ -158,9 +175,10 @@ public List buildRules() { }) ), RuleType.FILL_UP_HAVING_AGGREGATE.build( - logicalHaving(aggregate()).then(having -> { + logicalHaving(aggregate()).thenApply(ctx -> { + LogicalHaving> having = ctx.root; Aggregate agg = having.child(); - Resolver resolver = new Resolver(agg); + Resolver resolver = new Resolver(agg, ctx.cascadesContext.getOuterScope()); having.getConjuncts().forEach(resolver::resolve); return createPlan(resolver, agg, (r, a) -> { Set newConjuncts = ExpressionUtils.replace( @@ -175,7 +193,9 @@ public List buildRules() { ), // Convert having to filter RuleType.FILL_UP_HAVING_PROJECT.build( - logicalHaving(logicalProject()).then(having -> { + logicalHaving(logicalProject()).thenApply(ctx -> { + LogicalHaving> having = ctx.root; + Optional outerScope = ctx.cascadesContext.getOuterScope(); if (having.getExpressions().stream().anyMatch(e -> e.containsType(AggregateFunction.class))) { // This is very weird pattern. // There are some aggregate functions in having, but its child is project. @@ -198,7 +218,7 @@ public List buildRules() { ImmutableList.of(), ImmutableList.of(), project.child()); // avoid throw exception even if having have slot from its child. // because we will add a project between having and project. - Resolver resolver = new Resolver(agg, false); + Resolver resolver = new Resolver(agg, false, outerScope); having.getConjuncts().forEach(resolver::resolve); agg = agg.withAggOutput(resolver.getNewOutputSlots()); Set newConjuncts = ExpressionUtils.replace( @@ -212,7 +232,9 @@ public List buildRules() { Set notExistedInProject = having.getExpressions().stream() .map(Expression::getInputSlots) .flatMap(Set::stream) - .filter(s -> !projectOutputSet.contains(s)) + .filter(s -> !projectOutputSet.contains(s) + && (!outerScope.isPresent() || !outerScope.get() + .getCorrelatedSlots().contains(s))) .collect(Collectors.toSet()); if (notExistedInProject.isEmpty()) { return null; @@ -235,18 +257,28 @@ static class Resolver { private final List newOutputSlots = Lists.newArrayList(); private final Map outputSubstitutionMap; private final boolean checkSlot; + private final Optional outerScope; - Resolver(Aggregate aggregate, boolean checkSlot) { + Resolver(Aggregate aggregate, boolean checkSlot, Optional outerScope) { outputExpressions = aggregate.getOutputExpressions(); groupByExpressions = aggregate.getGroupByExpressions(); outputSubstitutionMap = outputExpressions.stream().filter(Alias.class::isInstance) .collect(Collectors.toMap(NamedExpression::toSlot, alias -> alias.child(0), (k1, k2) -> k1)); this.checkSlot = checkSlot; + this.outerScope = outerScope; + } + + Resolver(Aggregate aggregate, boolean checkSlot) { + this(aggregate, checkSlot, Optional.empty()); } Resolver(Aggregate aggregate) { - this(aggregate, true); + this(aggregate, true, Optional.empty()); + } + + Resolver(Aggregate aggregate, Optional outerScope) { + this(aggregate, true, outerScope); } public void resolve(Expression expression) { @@ -274,7 +306,8 @@ public void resolve(Expression expression) { // We couldn't find the equivalent expression in output expressions and group-by expressions, // so we should check whether the expression is valid. if (expression instanceof SlotReference) { - if (checkSlot) { + if (checkSlot && (!outerScope.isPresent() + || !outerScope.get().getCorrelatedSlots().contains(expression))) { throw new AnalysisException(expression.toSql() + " should be grouped by."); } } else if (expression instanceof AggregateFunction) { @@ -401,8 +434,8 @@ private boolean checkSort(LogicalSort logicalSort) { * @return filled up plan */ private Plan processDistinctProjectWithAggregate(LogicalSort sort, - Aggregate upperAggregate, Aggregate bottomAggregate) { - Resolver resolver = new Resolver(bottomAggregate); + Aggregate upperAggregate, Aggregate bottomAggregate, Optional outerScope) { + Resolver resolver = new Resolver(bottomAggregate, outerScope); sort.getExpressions().forEach(resolver::resolve); return createPlan(resolver, bottomAggregate, (r, a) -> { List newOrderKeys = sort.getOrderKeys().stream() diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/NormalizeGenerate.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/NormalizeGenerate.java new file mode 100644 index 00000000000000..200dc04630cec0 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/NormalizeGenerate.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.rules.analysis; + +import org.apache.doris.nereids.rules.Rule; +import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.trees.expressions.Alias; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.SubqueryExpr; +import org.apache.doris.nereids.trees.expressions.functions.Function; +import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.util.ExpressionUtils; + +import com.google.common.collect.ImmutableList; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * NormalizeGenerate + */ +public class NormalizeGenerate extends OneAnalysisRuleFactory { + @Override + public Rule build() { + return logicalGenerate() + .when(generate -> generate.getGenerators().stream() + .anyMatch(expr -> expr.containsType(SubqueryExpr.class))) + .then(generate -> { + List subqueries = ExpressionUtils.collectToList( + generate.getExpressions(), SubqueryExpr.class::isInstance); + Map replaceMap = new HashMap<>(); + ImmutableList.Builder builder = ImmutableList.builder(); + for (Expression expr : subqueries) { + Alias alias = new Alias(expr); + builder.add(alias); + replaceMap.put(expr, alias.toSlot()); + } + LogicalProject logicalProject = new LogicalProject(builder.build(), generate.child()); + List newGenerators = new ArrayList<>(generate.getGenerators().size()); + for (Function function : generate.getGenerators()) { + newGenerators.add((Function) ExpressionUtils.replace(function, replaceMap)); + } + return generate.withGenerators(newGenerators).withChildren(ImmutableList.of(logicalProject)); + }) + .toRule(RuleType.NORMALIZE_GENERATE); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubExprAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubExprAnalyzer.java index 7bfd5256f6af19..7b0ed45708251d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubExprAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubExprAnalyzer.java @@ -32,21 +32,29 @@ import org.apache.doris.nereids.trees.expressions.literal.BooleanLiteral; import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionRewriter; import org.apache.doris.nereids.trees.plans.Plan; +import org.apache.doris.nereids.trees.plans.PlanType; import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; +import org.apache.doris.nereids.trees.plans.logical.LogicalJoin; import org.apache.doris.nereids.trees.plans.logical.LogicalLimit; import org.apache.doris.nereids.trees.plans.logical.LogicalOneRowRelation; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.trees.plans.logical.LogicalSort; +import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; +import org.apache.doris.nereids.util.ExpressionUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.Set; /** * Use the visitor to iterate sub expression. @@ -114,22 +122,52 @@ public Expression visitInSubquery(InSubquery expr, T context) { @Override public Expression visitScalarSubquery(ScalarSubquery scalar, T context) { AnalyzedResult analyzedResult = analyzeSubquery(scalar); + boolean isCorrelated = analyzedResult.isCorrelated(); + LogicalPlan analyzedSubqueryPlan = analyzedResult.logicalPlan; + checkOutputColumn(analyzedSubqueryPlan); + if (isCorrelated) { + if (analyzedSubqueryPlan instanceof LogicalLimit) { + LogicalLimit limit = (LogicalLimit) analyzedSubqueryPlan; + if (limit.getOffset() == 0 && limit.getLimit() == 1) { + // skip useless limit node + analyzedResult = new AnalyzedResult((LogicalPlan) analyzedSubqueryPlan.child(0), + analyzedResult.correlatedSlots); + } else { + throw new AnalysisException("limit is not supported in correlated subquery " + + analyzedResult.getLogicalPlan()); + } + } + if (analyzedSubqueryPlan instanceof LogicalSort) { + // skip useless sort node + analyzedResult = new AnalyzedResult((LogicalPlan) analyzedSubqueryPlan.child(0), + analyzedResult.correlatedSlots); + } + CorrelatedSlotsValidator validator = + new CorrelatedSlotsValidator(ImmutableSet.copyOf(analyzedResult.correlatedSlots)); + List nodeInfoList = new ArrayList<>(16); + Set topAgg = new HashSet<>(); + validateSubquery(analyzedResult.logicalPlan, validator, nodeInfoList, topAgg); + } - checkOutputColumn(analyzedResult.getLogicalPlan()); - checkHasAgg(analyzedResult); - checkHasNoGroupBy(analyzedResult); - - // if scalar subquery is like select '2024-02-02 00:00:00' - // we can just return the constant expr '2024-02-02 00:00:00' if (analyzedResult.getLogicalPlan() instanceof LogicalProject) { LogicalProject project = (LogicalProject) analyzedResult.getLogicalPlan(); if (project.child() instanceof LogicalOneRowRelation && project.getProjects().size() == 1 && project.getProjects().get(0) instanceof Alias) { + // if scalar subquery is like select '2024-02-02 00:00:00' + // we can just return the constant expr '2024-02-02 00:00:00' Alias alias = (Alias) project.getProjects().get(0); if (alias.isConstant()) { return alias.child(); } + } else if (isCorrelated) { + Set correlatedSlots = new HashSet<>(analyzedResult.getCorrelatedSlots()); + if (!Sets.intersection(ExpressionUtils.getInputSlotSet(project.getProjects()), + correlatedSlots).isEmpty()) { + throw new AnalysisException( + "outer query's column is not supported in subquery's output " + + analyzedResult.getLogicalPlan()); + } } } @@ -143,27 +181,6 @@ private void checkOutputColumn(LogicalPlan plan) { } } - private void checkHasAgg(AnalyzedResult analyzedResult) { - if (!analyzedResult.isCorrelated()) { - return; - } - if (!analyzedResult.hasAgg()) { - throw new AnalysisException("The select item in correlated subquery of binary predicate " - + "should only be sum, min, max, avg and count. Current subquery: " - + analyzedResult.getLogicalPlan()); - } - } - - private void checkHasNoGroupBy(AnalyzedResult analyzedResult) { - if (!analyzedResult.isCorrelated()) { - return; - } - if (analyzedResult.hasGroupBy()) { - throw new AnalysisException("Unsupported correlated subquery with grouping and/or aggregation " - + analyzedResult.getLogicalPlan()); - } - } - private void checkNoCorrelatedSlotsUnderAgg(AnalyzedResult analyzedResult) { if (analyzedResult.hasCorrelatedSlotsUnderAgg()) { throw new AnalysisException( @@ -230,30 +247,19 @@ public boolean isCorrelated() { return !correlatedSlots.isEmpty(); } - public boolean hasAgg() { - return logicalPlan.anyMatch(LogicalAggregate.class::isInstance); - } - - public boolean hasGroupBy() { - if (hasAgg()) { - return !((LogicalAggregate) - ((ImmutableSet) logicalPlan.collect(LogicalAggregate.class::isInstance)).asList().get(0)) - .getGroupByExpressions().isEmpty(); - } - return false; - } - public boolean hasCorrelatedSlotsUnderAgg() { return correlatedSlots.isEmpty() ? false - : findAggContainsCorrelatedSlots(logicalPlan, ImmutableSet.copyOf(correlatedSlots)); + : hasCorrelatedSlotsUnderNode(logicalPlan, + ImmutableSet.copyOf(correlatedSlots), LogicalAggregate.class); } - private boolean findAggContainsCorrelatedSlots(Plan rootPlan, ImmutableSet slots) { + private static boolean hasCorrelatedSlotsUnderNode(Plan rootPlan, + ImmutableSet slots, Class clazz) { ArrayDeque planQueue = new ArrayDeque<>(); planQueue.add(rootPlan); while (!planQueue.isEmpty()) { Plan plan = planQueue.poll(); - if (plan instanceof LogicalAggregate) { + if (plan.getClass().equals(clazz)) { if (plan.containsSlots(slots)) { return true; } @@ -278,4 +284,171 @@ public boolean rootIsLimitZero() { return logicalPlan instanceof LogicalLimit && ((LogicalLimit) logicalPlan).getLimit() == 0; } } + + private static class PlanNodeCorrelatedInfo { + private PlanType planType; + private boolean containCorrelatedSlots; + private boolean hasGroupBy; + private LogicalAggregate aggregate; + + public PlanNodeCorrelatedInfo(PlanType planType, boolean containCorrelatedSlots) { + this(planType, containCorrelatedSlots, null); + } + + public PlanNodeCorrelatedInfo(PlanType planType, boolean containCorrelatedSlots, + LogicalAggregate aggregate) { + this.planType = planType; + this.containCorrelatedSlots = containCorrelatedSlots; + this.aggregate = aggregate; + this.hasGroupBy = aggregate != null ? !aggregate.getGroupByExpressions().isEmpty() : false; + } + } + + private static class CorrelatedSlotsValidator + extends PlanVisitor { + private final ImmutableSet correlatedSlots; + + public CorrelatedSlotsValidator(ImmutableSet correlatedSlots) { + this.correlatedSlots = correlatedSlots; + } + + @Override + public PlanNodeCorrelatedInfo visit(Plan plan, Void context) { + return new PlanNodeCorrelatedInfo(plan.getType(), findCorrelatedSlots(plan)); + } + + public PlanNodeCorrelatedInfo visitLogicalProject(LogicalProject plan, Void context) { + boolean containCorrelatedSlots = findCorrelatedSlots(plan); + if (containCorrelatedSlots) { + throw new AnalysisException( + String.format("access outer query's column in project is not supported", + correlatedSlots)); + } else { + PlanType planType = ExpressionUtils.containsWindowExpression( + ((LogicalProject) plan).getProjects()) ? PlanType.LOGICAL_WINDOW : plan.getType(); + return new PlanNodeCorrelatedInfo(planType, false); + } + } + + public PlanNodeCorrelatedInfo visitLogicalAggregate(LogicalAggregate plan, Void context) { + boolean containCorrelatedSlots = findCorrelatedSlots(plan); + if (containCorrelatedSlots) { + throw new AnalysisException( + String.format("access outer query's column in aggregate is not supported", + correlatedSlots, plan)); + } else { + return new PlanNodeCorrelatedInfo(plan.getType(), false, plan); + } + } + + public PlanNodeCorrelatedInfo visitLogicalJoin(LogicalJoin plan, Void context) { + boolean containCorrelatedSlots = findCorrelatedSlots(plan); + if (containCorrelatedSlots) { + throw new AnalysisException( + String.format("access outer query's column in join is not supported", + correlatedSlots, plan)); + } else { + return new PlanNodeCorrelatedInfo(plan.getType(), false); + } + } + + public PlanNodeCorrelatedInfo visitLogicalSort(LogicalSort plan, Void context) { + boolean containCorrelatedSlots = findCorrelatedSlots(plan); + if (containCorrelatedSlots) { + throw new AnalysisException( + String.format("access outer query's column in order by is not supported", + correlatedSlots, plan)); + } else { + return new PlanNodeCorrelatedInfo(plan.getType(), false); + } + } + + private boolean findCorrelatedSlots(Plan plan) { + return plan.getExpressions().stream().anyMatch(expression -> !Sets + .intersection(correlatedSlots, expression.getInputSlots()).isEmpty()); + } + } + + private LogicalAggregate validateNodeInfoList(List nodeInfoList) { + LogicalAggregate topAggregate = null; + int size = nodeInfoList.size(); + if (size > 0) { + List correlatedNodes = new ArrayList<>(4); + boolean checkNodeTypeAfterCorrelatedNode = false; + boolean checkAfterAggNode = false; + for (int i = size - 1; i >= 0; --i) { + PlanNodeCorrelatedInfo nodeInfo = nodeInfoList.get(i); + if (checkNodeTypeAfterCorrelatedNode) { + switch (nodeInfo.planType) { + case LOGICAL_LIMIT: + throw new AnalysisException( + "limit is not supported in correlated subquery"); + case LOGICAL_GENERATE: + throw new AnalysisException( + "access outer query's column before lateral view is not supported"); + case LOGICAL_AGGREGATE: + if (checkAfterAggNode) { + throw new AnalysisException( + "access outer query's column before two agg nodes is not supported"); + } + if (nodeInfo.hasGroupBy) { + // TODO support later + throw new AnalysisException( + "access outer query's column before agg with group by is not supported"); + } + checkAfterAggNode = true; + topAggregate = nodeInfo.aggregate; + break; + case LOGICAL_WINDOW: + throw new AnalysisException( + "access outer query's column before window function is not supported"); + case LOGICAL_JOIN: + throw new AnalysisException( + "access outer query's column before join is not supported"); + case LOGICAL_SORT: + // allow any sort node, the sort node will be removed by ELIMINATE_ORDER_BY_UNDER_SUBQUERY + break; + case LOGICAL_PROJECT: + // allow any project node + break; + case LOGICAL_SUBQUERY_ALIAS: + // allow any subquery alias + break; + default: + if (checkAfterAggNode) { + throw new AnalysisException( + "only project, sort and subquery alias node is allowed after agg node"); + } + break; + } + } + if (nodeInfo.containCorrelatedSlots) { + correlatedNodes.add(nodeInfo); + checkNodeTypeAfterCorrelatedNode = true; + } + } + + // only support 1 correlated node for now + if (correlatedNodes.size() > 1) { + throw new AnalysisException( + "access outer query's column in two places is not supported"); + } + } + return topAggregate; + } + + private void validateSubquery(Plan plan, CorrelatedSlotsValidator validator, + List nodeInfoList, Set topAgg) { + nodeInfoList.add(plan.accept(validator, null)); + for (Plan child : plan.children()) { + validateSubquery(child, validator, nodeInfoList, topAgg); + } + if (plan.children().isEmpty()) { + LogicalAggregate topAggNode = validateNodeInfoList(nodeInfoList); + if (topAggNode != null) { + topAgg.add(topAggNode); + } + } + nodeInfoList.remove(nodeInfoList.size() - 1); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubqueryToApply.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubqueryToApply.java index cfc5b2ba24a11b..17e7d098cad552 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubqueryToApply.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/SubqueryToApply.java @@ -17,6 +17,7 @@ package org.apache.doris.nereids.rules.analysis; +import org.apache.doris.common.Pair; import org.apache.doris.nereids.CascadesContext; import org.apache.doris.nereids.StatementContext; import org.apache.doris.nereids.rules.Rule; @@ -30,6 +31,8 @@ import org.apache.doris.nereids.trees.expressions.Exists; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.InSubquery; +import org.apache.doris.nereids.trees.expressions.IsNull; +import org.apache.doris.nereids.trees.expressions.LessThanEqual; import org.apache.doris.nereids.trees.expressions.ListQuery; import org.apache.doris.nereids.trees.expressions.MarkJoinSlotReference; import org.apache.doris.nereids.trees.expressions.NamedExpression; @@ -39,8 +42,14 @@ import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.SlotReference; import org.apache.doris.nereids.trees.expressions.SubqueryExpr; +import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; +import org.apache.doris.nereids.trees.expressions.functions.agg.AnyValue; +import org.apache.doris.nereids.trees.expressions.functions.agg.Count; +import org.apache.doris.nereids.trees.expressions.functions.scalar.AssertTrue; import org.apache.doris.nereids.trees.expressions.functions.scalar.Nvl; import org.apache.doris.nereids.trees.expressions.literal.BooleanLiteral; +import org.apache.doris.nereids.trees.expressions.literal.IntegerLiteral; +import org.apache.doris.nereids.trees.expressions.literal.VarcharLiteral; import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionRewriter; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; @@ -54,10 +63,12 @@ import org.apache.doris.nereids.util.ExpressionUtils; import org.apache.doris.nereids.util.Utils; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import java.util.Collection; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -124,14 +135,14 @@ public List buildRules() { TrySimplifyPredicateWithMarkJoinSlot.INSTANCE.rewrite(conjunct, rewriteContext), rewriteContext) : false; - - applyPlan = subqueryToApply(subqueryExprs.stream() + Pair> result = subqueryToApply(subqueryExprs.stream() .collect(ImmutableList.toImmutableList()), tmpPlan, context.getSubqueryToMarkJoinSlot(), ctx.cascadesContext, Optional.of(conjunct), false, isMarkSlotNotNull); + applyPlan = result.first; tmpPlan = applyPlan; - newConjuncts.add(conjunct); + newConjuncts.add(result.second.isPresent() ? result.second.get() : conjunct); } Plan newFilter = new LogicalFilter<>(newConjuncts.build(), applyPlan); return new LogicalProject<>(filter.getOutput().stream().collect(ImmutableList.toImmutableList()), @@ -167,13 +178,15 @@ public List buildRules() { Expression newProject = replaceSubquery.replace(oldProjects.get(i), context); - applyPlan = subqueryToApply( - Utils.fastToImmutableList(subqueryExprs), - childPlan, context.getSubqueryToMarkJoinSlot(), - ctx.cascadesContext, - Optional.of(newProject), true, false); + Pair> result = + subqueryToApply(Utils.fastToImmutableList(subqueryExprs), childPlan, + context.getSubqueryToMarkJoinSlot(), ctx.cascadesContext, + Optional.of(newProject), true, false); + applyPlan = result.first; childPlan = applyPlan; - newProjects.add((NamedExpression) newProject); + newProjects.add( + result.second.isPresent() ? (NamedExpression) result.second.get() + : (NamedExpression) newProject); } return project.withProjectsAndChild(newProjects.build(), childPlan); @@ -248,17 +261,18 @@ public List buildRules() { TrySimplifyPredicateWithMarkJoinSlot.INSTANCE.rewrite(conjunct, rewriteContext), rewriteContext) : false; - applyPlan = subqueryToApply( + Pair> result = subqueryToApply( subqueryExprs.stream().collect(ImmutableList.toImmutableList()), relatedInfoList.get(i) == RelatedInfo.RelatedToLeft ? leftChildPlan : rightChildPlan, context.getSubqueryToMarkJoinSlot(), ctx.cascadesContext, Optional.of(conjunct), false, isMarkSlotNotNull); + applyPlan = result.first; if (relatedInfoList.get(i) == RelatedInfo.RelatedToLeft) { leftChildPlan = applyPlan; } else { rightChildPlan = applyPlan; } - newConjuncts.add(conjunct); + newConjuncts.add(result.second.isPresent() ? result.second.get() : conjunct); } List simpleConjuncts = joinConjuncts.get(false); if (simpleConjuncts != null) { @@ -350,12 +364,12 @@ private ImmutableList collectRelatedInfo(List subqueryC return correlatedInfoList.build(); } - private LogicalPlan subqueryToApply(List subqueryExprs, LogicalPlan childPlan, - Map> subqueryToMarkJoinSlot, - CascadesContext ctx, - Optional conjunct, boolean isProject, - boolean isMarkJoinSlotNotNull) { - LogicalPlan tmpPlan = childPlan; + private Pair> subqueryToApply( + List subqueryExprs, LogicalPlan childPlan, + Map> subqueryToMarkJoinSlot, + CascadesContext ctx, Optional conjunct, boolean isProject, + boolean isMarkJoinSlotNotNull) { + Pair> tmpPlan = Pair.of(childPlan, conjunct); for (int i = 0; i < subqueryExprs.size(); ++i) { SubqueryExpr subqueryExpr = subqueryExprs.get(i); if (subqueryExpr instanceof Exists && hasTopLevelScalarAgg(subqueryExpr.getQueryPlan())) { @@ -366,7 +380,7 @@ private LogicalPlan subqueryToApply(List subqueryExprs, LogicalPla } if (!ctx.subqueryIsAnalyzed(subqueryExpr)) { - tmpPlan = addApply(subqueryExpr, tmpPlan, + tmpPlan = addApply(subqueryExpr, tmpPlan.first, subqueryToMarkJoinSlot, ctx, conjunct, isProject, subqueryExprs.size() == 1, isMarkJoinSlotNotNull); } @@ -383,32 +397,108 @@ private static boolean hasTopLevelScalarAgg(Plan plan) { return false; } - private LogicalPlan addApply(SubqueryExpr subquery, LogicalPlan childPlan, - Map> subqueryToMarkJoinSlot, - CascadesContext ctx, Optional conjunct, - boolean isProject, boolean singleSubquery, boolean isMarkJoinSlotNotNull) { + private Pair> addApply(SubqueryExpr subquery, + LogicalPlan childPlan, + Map> subqueryToMarkJoinSlot, + CascadesContext ctx, Optional conjunct, boolean isProject, + boolean singleSubquery, boolean isMarkJoinSlotNotNull) { ctx.setSubqueryExprIsAnalyzed(subquery, true); + Optional markJoinSlot = subqueryToMarkJoinSlot.get(subquery); boolean needAddScalarSubqueryOutputToProjects = isConjunctContainsScalarSubqueryOutput( subquery, conjunct, isProject, singleSubquery); + boolean needRuntimeAssertCount = false; + NamedExpression oldSubqueryOutput = subquery.getQueryPlan().getOutput().get(0); + Slot countSlot = null; + Slot anyValueSlot = null; + Optional newConjunct = conjunct; + if (needAddScalarSubqueryOutputToProjects && subquery instanceof ScalarSubquery + && !subquery.getCorrelateSlots().isEmpty()) { + if (((ScalarSubquery) subquery).hasTopLevelScalarAgg()) { + // consider sql: SELECT * FROM t1 WHERE t1.a <= (SELECT COUNT(t2.a) FROM t2 WHERE (t1.b = t2.b)); + // when unnest correlated subquery, we create a left join node. + // outer query is left table and subquery is right one + // if there is no match, the row from right table is filled with nulls + // but COUNT function is always not nullable. + // so wrap COUNT with Nvl to ensure its result is 0 instead of null to get the correct result + if (conjunct.isPresent()) { + Map replaceMap = new HashMap<>(); + NamedExpression agg = ((ScalarSubquery) subquery).getTopLevelScalarAggFunction().get(); + if (agg instanceof Alias) { + if (((Alias) agg).child() instanceof AlwaysNotNullable) { + AlwaysNotNullable notNullableAggFunc = + (AlwaysNotNullable) ((Alias) agg).child(); + if (subquery.getQueryPlan() instanceof LogicalProject) { + LogicalProject logicalProject = + (LogicalProject) subquery.getQueryPlan(); + Preconditions.checkState(logicalProject.getOutputs().size() == 1, + "Scalar subuqery's should only output 1 column"); + Slot aggSlot = agg.toSlot(); + replaceMap.put(aggSlot, new Alias(new Nvl(aggSlot, + notNullableAggFunc.resultForEmptyInput()))); + NamedExpression newOutput = (NamedExpression) ExpressionUtils + .replace((NamedExpression) logicalProject.getProjects().get(0), replaceMap); + replaceMap.clear(); + replaceMap.put(oldSubqueryOutput, newOutput.toSlot()); + oldSubqueryOutput = newOutput; + subquery = subquery.withSubquery((LogicalPlan) logicalProject.child()); + } else { + replaceMap.put(oldSubqueryOutput, new Nvl(oldSubqueryOutput, + notNullableAggFunc.resultForEmptyInput())); + } + } + if (!replaceMap.isEmpty()) { + newConjunct = Optional.of(ExpressionUtils.replace(conjunct.get(), replaceMap)); + } + } + } + } else { + // if scalar subquery doesn't have top level scalar agg we will create one, for example + // select (select t2.c1 from t2 where t2.c2 = t1.c2) from t1; + // the original output of the correlate subquery is t2.c1, after adding a scalar agg, it will be + // select (select count(*), any_value(t2.c1) from t2 where t2.c2 = t1.c2) from t1; + Alias countAlias = new Alias(new Count()); + Alias anyValueAlias = new Alias(new AnyValue(oldSubqueryOutput)); + LogicalAggregate aggregate = new LogicalAggregate<>(ImmutableList.of(), + ImmutableList.of(countAlias, anyValueAlias), subquery.getQueryPlan()); + countSlot = countAlias.toSlot(); + anyValueSlot = anyValueAlias.toSlot(); + subquery = subquery.withSubquery(aggregate); + if (conjunct.isPresent()) { + Map replaceMap = new HashMap<>(); + replaceMap.put(oldSubqueryOutput, anyValueSlot); + newConjunct = Optional.of(ExpressionUtils.replace(conjunct.get(), replaceMap)); + } + needRuntimeAssertCount = true; + } + } LogicalApply newApply = new LogicalApply( subquery.getCorrelateSlots(), subquery, Optional.empty(), - subqueryToMarkJoinSlot.get(subquery), + markJoinSlot, needAddScalarSubqueryOutputToProjects, isProject, isMarkJoinSlotNotNull, childPlan, subquery.getQueryPlan()); - List projects = ImmutableList.builder() - // left child - .addAll(childPlan.getOutput()) - // markJoinSlotReference - .addAll(subqueryToMarkJoinSlot.get(subquery).isPresent() - ? ImmutableList.of(subqueryToMarkJoinSlot.get(subquery).get()) : ImmutableList.of()) - // scalarSubquery output - .addAll(needAddScalarSubqueryOutputToProjects - ? ImmutableList.of(subquery.getQueryPlan().getOutput().get(0)) : ImmutableList.of()) - .build(); - - return new LogicalProject(projects, newApply); + ImmutableList.Builder projects = + ImmutableList.builderWithExpectedSize(childPlan.getOutput().size() + 3); + // left child + projects.addAll(childPlan.getOutput()); + // markJoinSlotReference + markJoinSlot.map(projects::add); + if (needAddScalarSubqueryOutputToProjects) { + if (needRuntimeAssertCount) { + // if we create a new subquery in previous step, we need add the any_value() and assert_true() + // into the project list. So BE will use assert_true to check if the subquery return only 1 row + projects.add(anyValueSlot); + projects.add(new Alias(new AssertTrue( + ExpressionUtils.or(new IsNull(countSlot), + new LessThanEqual(countSlot, new IntegerLiteral(1))), + new VarcharLiteral("correlate scalar subquery must return only 1 row")))); + } else { + projects.add(oldSubqueryOutput); + } + } + + return Pair.of(new LogicalProject(projects.build(), newApply), newConjunct); } private boolean isConjunctContainsScalarSubqueryOutput( diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/UserAuthentication.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/UserAuthentication.java index df94a051afe54b..2eca9a45d3f7a4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/UserAuthentication.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/UserAuthentication.java @@ -17,7 +17,6 @@ package org.apache.doris.nereids.rules.analysis; -import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.DatabaseIf; import org.apache.doris.catalog.TableIf; import org.apache.doris.common.ErrorCode; @@ -59,14 +58,13 @@ public static void checkPermission(TableIf table, ConnectContext connectContext, } String ctlName = catalog.getName(); AccessControllerManager accessManager = connectContext.getEnv().getAccessManager(); - UserIdentity userIdentity = connectContext.getCurrentUserIdentity(); if (CollectionUtils.isEmpty(columns)) { - if (!accessManager.checkTblPriv(userIdentity, ctlName, dbName, tableName, PrivPredicate.SELECT)) { + if (!accessManager.checkTblPriv(connectContext, ctlName, dbName, tableName, PrivPredicate.SELECT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLE_ACCESS_DENIED_ERROR, PrivPredicate.SELECT.getPrivs().toString(), tableName); } } else { - accessManager.checkColumnsPriv(userIdentity, ctlName, dbName, tableName, columns, PrivPredicate.SELECT); + accessManager.checkColumnsPriv(connectContext, ctlName, dbName, tableName, columns, PrivPredicate.SELECT); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewAggregateRule.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewAggregateRule.java index 909f67de204255..1a66eda2ad3eed 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewAggregateRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewAggregateRule.java @@ -17,7 +17,10 @@ package org.apache.doris.nereids.rules.exploration.mv; +import org.apache.doris.catalog.Column; +import org.apache.doris.catalog.MTMV; import org.apache.doris.common.Pair; +import org.apache.doris.mtmv.BaseTableInfo; import org.apache.doris.nereids.CascadesContext; import org.apache.doris.nereids.jobs.executor.Rewriter; import org.apache.doris.nereids.properties.DataTrait; @@ -38,6 +41,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; import org.apache.doris.nereids.trees.expressions.Slot; +import org.apache.doris.nereids.trees.expressions.SlotReference; import org.apache.doris.nereids.trees.expressions.VirtualSlotReference; import org.apache.doris.nereids.trees.expressions.functions.Function; import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction; @@ -63,6 +67,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.function.Supplier; @@ -324,6 +329,51 @@ protected Expression tryRewriteExpression(StructInfo queryStructInfo, Expression return rewrittenExpression; } + /** + * Not all query after rewritten successfully can compensate union all + * Such as: + * mv def sql is as following, partition column is a + * select a, b, count(*) from t1 group by a, b + * Query is as following: + * select b, count(*) from t1 group by b, after rewritten by materialized view successfully + * If mv part partition is invalid, can not compensate union all, because result is wrong after + * compensate union all. + */ + @Override + protected boolean canUnionRewrite(Plan queryPlan, MTMV mtmv, CascadesContext cascadesContext) { + // Check query plan is contain the partition column + // Query plan in the current rule must contain aggregate node, because the rule pattern is + // + Optional> logicalAggregateOptional = + queryPlan.collectFirst(planTreeNode -> planTreeNode instanceof LogicalAggregate); + if (!logicalAggregateOptional.isPresent()) { + return true; + } + List groupByExpressions = logicalAggregateOptional.get().getGroupByExpressions(); + if (groupByExpressions.isEmpty()) { + // Scalar aggregate can not compensate union all + return false; + } + final String relatedCol = mtmv.getMvPartitionInfo().getRelatedCol(); + final BaseTableInfo relatedTableInfo = mtmv.getMvPartitionInfo().getRelatedTableInfo(); + boolean canUnionRewrite = false; + // Check the query plan group by expression contains partition col or not + List groupByShuttledExpressions = + ExpressionUtils.shuttleExpressionWithLineage(groupByExpressions, queryPlan, new BitSet()); + for (Expression expression : groupByShuttledExpressions) { + canUnionRewrite = !expression.collectToSet(expr -> expr instanceof SlotReference + && ((SlotReference) expr).isColumnFromTable() + && Objects.equals(((SlotReference) expr).getColumn().map(Column::getName).orElse(null), + relatedCol) + && Objects.equals(((SlotReference) expr).getTable().map(BaseTableInfo::new).orElse(null), + relatedTableInfo)).isEmpty(); + if (canUnionRewrite) { + break; + } + } + return canUnionRewrite; + } + /** * Check query and view aggregate compatibility */ diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java index 9fef549c0a7f5d..7d84b8ab36b59c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java @@ -292,6 +292,17 @@ protected List doRewrite(StructInfo queryStructInfo, CascadesContext casca return rewriteResults; } boolean partitionNeedUnion = needUnionRewrite(invalidPartitions, cascadesContext); + boolean canUnionRewrite = canUnionRewrite(queryPlan, + ((AsyncMaterializationContext) materializationContext).getMtmv(), + cascadesContext); + if (partitionNeedUnion && !canUnionRewrite) { + materializationContext.recordFailReason(queryStructInfo, + "need compensate union all, but can not, because the query structInfo", + () -> String.format("mv partition info is %s, and the query plan is %s", + ((AsyncMaterializationContext) materializationContext).getMtmv() + .getMvPartitionInfo(), queryPlan.treeString())); + return rewriteResults; + } final Pair>, Map>> finalInvalidPartitions = invalidPartitions; if (partitionNeedUnion) { @@ -377,6 +388,20 @@ protected boolean needUnionRewrite( && (!invalidPartitions.key().isEmpty() || !invalidPartitions.value().isEmpty()); } + /** + * Not all query after rewritten successfully can compensate union all + * Such as: + * mv def sql is as following, partition column is a + * select a, b, count(*) from t1 group by a, b + * Query is as following: + * select b, count(*) from t1 group by b, after rewritten by materialized view successfully + * If mv part partition is invalid, can not compensate union all, because result is wrong after + * compensate union all. + */ + protected boolean canUnionRewrite(Plan queryPlan, MTMV mtmv, CascadesContext cascadesContext) { + return true; + } + // Normalize expression such as nullable property and output slot id protected Plan normalizeExpressions(Plan rewrittenPlan, Plan originPlan) { if (rewrittenPlan.getOutput().size() != originPlan.getOutput().size()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java index 254297842b521e..6af72b1e81db3f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java @@ -247,7 +247,7 @@ public static Plan generateMvScanPlan(OlapTable table, long indexId, /** * Optimize by rules, this support optimize by custom rules by define different rewriter according to different - * rules + * rules, this method is only for materialized view rewrite */ public static Plan rewriteByRules( CascadesContext cascadesContext, @@ -267,7 +267,12 @@ public static Plan rewriteByRules( CascadesContext rewrittenPlanContext = CascadesContext.initContext( cascadesContext.getStatementContext(), rewrittenPlan, cascadesContext.getCurrentJobContext().getRequiredProperties()); - rewrittenPlan = planRewriter.apply(rewrittenPlanContext); + try { + rewrittenPlanContext.getConnectContext().setSkipAuth(true); + rewrittenPlan = planRewriter.apply(rewrittenPlanContext); + } finally { + rewrittenPlanContext.getConnectContext().setSkipAuth(false); + } Map exprIdToNewRewrittenSlot = Maps.newLinkedHashMap(); for (Slot slot : rewrittenPlan.getOutput()) { exprIdToNewRewrittenSlot.put(slot.getExprId(), slot); @@ -429,6 +434,20 @@ public Void visitLogicalRelation(LogicalRelation relation, IncrementCheckerConte + "but now is %s", relation.getClass().getSimpleName())); return null; } + SlotReference contextPartitionColumn = getContextPartitionColumn(context); + if (contextPartitionColumn == null) { + context.addFailReason(String.format("mv partition column is not from table when relation check, " + + "mv partition column is %s", context.getMvPartitionColumn())); + return null; + } + // Check the table which mv partition column belonged to is same as the current check relation or not + if (!((LogicalCatalogRelation) relation).getTable().getFullQualifiers().equals( + contextPartitionColumn.getTable().map(TableIf::getFullQualifiers).orElse(ImmutableList.of()))) { + context.addFailReason(String.format("mv partition column name is not belonged to current check , " + + "table, current table is %s", + ((LogicalCatalogRelation) relation).getTable().getFullQualifiers())); + return null; + } LogicalCatalogRelation logicalCatalogRelation = (LogicalCatalogRelation) relation; TableIf table = logicalCatalogRelation.getTable(); // if self join, self join can not partition track now, remove the partition column correspondingly @@ -457,10 +476,6 @@ public Void visitLogicalRelation(LogicalRelation relation, IncrementCheckerConte return null; } Set partitionColumnSet = new HashSet<>(relatedTable.getPartitionColumns()); - SlotReference contextPartitionColumn = getContextPartitionColumn(context); - if (contextPartitionColumn == null) { - return null; - } Column mvReferenceColumn = contextPartitionColumn.getColumn().get(); Expr definExpr = mvReferenceColumn.getDefineExpr(); if (definExpr instanceof SlotRef) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java index d133eac32baddb..810d281ac579ac 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnBE.java @@ -176,7 +176,16 @@ private static Expression replace( if (newChild != child) { hasNewChildren = true; } - newChildren.add(newChild); + if (!newChild.getDataType().equals(child.getDataType())) { + try { + newChildren.add(newChild.castTo(child.getDataType())); + } catch (Exception e) { + LOG.warn("expression of type {} cast to {} failed. ", newChild.getDataType(), child.getDataType()); + newChildren.add(newChild); + } + } else { + newChildren.add(newChild); + } } return hasNewChildren ? root.withChildren(newChildren) : root; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java index 400577f5a6a943..b29694d5440e6c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java @@ -20,6 +20,7 @@ import org.apache.doris.catalog.EncryptKey; import org.apache.doris.catalog.Env; import org.apache.doris.cluster.ClusterNamespace; +import org.apache.doris.common.util.DebugUtil; import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.rules.expression.AbstractExpressionRewriteRule; import org.apache.doris.nereids.rules.expression.ExpressionListenerMatcher; @@ -62,6 +63,8 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.Date; import org.apache.doris.nereids.trees.expressions.functions.scalar.EncryptKeyRef; import org.apache.doris.nereids.trees.expressions.functions.scalar.If; +import org.apache.doris.nereids.trees.expressions.functions.scalar.LastQueryId; +import org.apache.doris.nereids.trees.expressions.functions.scalar.Nvl; import org.apache.doris.nereids.trees.expressions.functions.scalar.Password; import org.apache.doris.nereids.trees.expressions.functions.scalar.SessionUser; import org.apache.doris.nereids.trees.expressions.functions.scalar.User; @@ -84,6 +87,7 @@ import org.apache.doris.nereids.util.ExpressionUtils; import org.apache.doris.qe.ConnectContext; import org.apache.doris.qe.GlobalVariable; +import org.apache.doris.thrift.TUniqueId; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -167,7 +171,9 @@ public List> buildRules() { matches(Array.class, this::visitArray), matches(Date.class, this::visitDate), matches(Version.class, this::visitVersion), - matches(SessionUser.class, this::visitSessionUser) + matches(SessionUser.class, this::visitSessionUser), + matches(LastQueryId.class, this::visitLastQueryId), + matches(Nvl.class, this::visitNvl) ); } @@ -335,6 +341,16 @@ public Expression visitSessionUser(SessionUser user, ExpressionRewriteContext co return new VarcharLiteral(res); } + @Override + public Expression visitLastQueryId(LastQueryId queryId, ExpressionRewriteContext context) { + String res = "Not Available"; + TUniqueId id = context.cascadesContext.getConnectContext().getLastQueryId(); + if (id != null) { + res = DebugUtil.printId(id); + } + return new VarcharLiteral(res); + } + @Override public Expression visitConnectionId(ConnectionId connectionId, ExpressionRewriteContext context) { return new BigIntLiteral(context.cascadesContext.getConnectContext().getConnectionId()); @@ -630,6 +646,21 @@ public Expression visitVersion(Version version, ExpressionRewriteContext context return new StringLiteral(GlobalVariable.version); } + @Override + public Expression visitNvl(Nvl nvl, ExpressionRewriteContext context) { + for (Expression expr : nvl.children()) { + if (expr.isLiteral()) { + if (!expr.isNullLiteral()) { + return expr; + } + } else { + return nvl; + } + } + // all nulls + return nvl.child(0); + } + private E rewriteChildren(E expr, ExpressionRewriteContext context) { if (!deepRewrite) { return expr; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java index aaea828f071acd..80274331dea4b9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/implementation/AggregateStrategies.java @@ -117,7 +117,8 @@ public List buildRules() { .when(agg -> { Set funcs = agg.getAggregateFunctions(); return !funcs.isEmpty() && funcs.stream() - .allMatch(f -> f instanceof Count && !f.isDistinct()); + .allMatch(f -> f instanceof Count && !f.isDistinct() && (((Count) f).isCountStar() + || f.child(0) instanceof Slot)); }) .thenApply(ctx -> { LogicalAggregate> agg = ctx.root; @@ -136,7 +137,8 @@ public List buildRules() { .when(agg -> agg.getGroupByExpressions().isEmpty()) .when(agg -> { Set funcs = agg.getAggregateFunctions(); - return !funcs.isEmpty() && funcs.stream().allMatch(f -> f instanceof Count && !f.isDistinct()); + return !funcs.isEmpty() && funcs.stream().allMatch(f -> f instanceof Count && !f.isDistinct() + && (((Count) f).isCountStar() || f.child(0) instanceof Slot)); }) .thenApply(ctx -> { LogicalAggregate>> agg = ctx.root; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AdjustNullable.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AdjustNullable.java index 808288b8fe3cab..198b6363d9a1e2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AdjustNullable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AdjustNullable.java @@ -31,6 +31,7 @@ import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; import org.apache.doris.nereids.trees.plans.logical.LogicalCTEConsumer; +import org.apache.doris.nereids.trees.plans.logical.LogicalExternalRelation; import org.apache.doris.nereids.trees.plans.logical.LogicalFilter; import org.apache.doris.nereids.trees.plans.logical.LogicalGenerate; import org.apache.doris.nereids.trees.plans.logical.LogicalJoin; @@ -276,6 +277,17 @@ public Plan visitLogicalCTEConsumer(LogicalCTEConsumer cteConsumer, Map replaceMap) { + if (!relation.getConjuncts().isEmpty()) { + relation.getOutputSet().forEach(s -> replaceMap.put(s.getExprId(), s)); + Set conjuncts = updateExpressions(relation.getConjuncts(), replaceMap); + return relation.withConjuncts(conjuncts).recomputeLogicalProperties(); + } else { + return relation; + } + } + private T updateExpression(T input, Map replaceMap) { return (T) input.rewriteDownShortCircuit(e -> e.accept(SlotReferenceReplacer.INSTANCE, replaceMap)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java index 6f067545cee0cc..2cfe4523003879 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/ExtractAndNormalizeWindowExpression.java @@ -17,6 +17,7 @@ package org.apache.doris.nereids.rules.rewrite; +import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleType; import org.apache.doris.nereids.trees.expressions.Alias; @@ -25,11 +26,7 @@ import org.apache.doris.nereids.trees.expressions.OrderExpression; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.WindowExpression; -import org.apache.doris.nereids.trees.expressions.functions.agg.Avg; -import org.apache.doris.nereids.trees.expressions.functions.agg.Max; -import org.apache.doris.nereids.trees.expressions.functions.agg.Min; import org.apache.doris.nereids.trees.expressions.functions.agg.NullableAggregateFunction; -import org.apache.doris.nereids.trees.expressions.functions.agg.Sum; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; import org.apache.doris.nereids.trees.plans.logical.LogicalWindow; @@ -64,13 +61,14 @@ private Plan normalize(LogicalProject project) { if (output instanceof WindowExpression) { WindowExpression windowExpression = (WindowExpression) output; Expression expression = ((WindowExpression) output).getFunction(); - if (expression instanceof Sum || expression instanceof Max - || expression instanceof Min || expression instanceof Avg) { - // sum, max, min and avg in window function should be always nullable - windowExpression = ((WindowExpression) output) - .withFunction( - ((NullableAggregateFunction) expression).withAlwaysNullable(true) - ); + if (expression.containsType(OrderExpression.class)) { + throw new AnalysisException("order by is not supported in " + expression); + } + if (expression instanceof NullableAggregateFunction) { + // NullableAggregateFunction in window function should be always nullable + // Because there may be no data in the window frame, null values will be generated. + windowExpression = ((WindowExpression) output).withFunction( + ((NullableAggregateFunction) expression).withAlwaysNullable(true)); } ImmutableList.Builder nonLiteralPartitionKeys = diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpCorrelatedFilterUnderApplyAggregateProject.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpCorrelatedFilterUnderApplyAggregateProject.java index 309bd9a78b93ce..c8cb9ebe8f5973 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpCorrelatedFilterUnderApplyAggregateProject.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpCorrelatedFilterUnderApplyAggregateProject.java @@ -19,17 +19,22 @@ import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; +import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; import org.apache.doris.nereids.trees.plans.logical.LogicalApply; import org.apache.doris.nereids.trees.plans.logical.LogicalFilter; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.util.ExpressionUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import com.google.common.collect.Sets; import java.util.List; +import java.util.Set; /** * Swap the order of project and filter under agg in correlated subqueries. @@ -94,8 +99,22 @@ public List buildRules() { } }); - LogicalProject newProject = project.withProjectsAndChild(newProjects, filter.child()); - LogicalFilter newFilter = new LogicalFilter<>(filter.getConjuncts(), newProject); + Set correlatedSlots = ExpressionUtils.getInputSlotSet(apply.getCorrelationSlot()); + Set pullUpPredicates = Sets.newLinkedHashSet(); + Set filterPredicates = Sets.newLinkedHashSet(); + for (Expression conjunct : filter.getConjuncts()) { + Set conjunctSlots = conjunct.getInputSlots(); + if (Sets.intersection(conjunctSlots, correlatedSlots).isEmpty()) { + filterPredicates.add(conjunct); + } else { + pullUpPredicates.add(conjunct); + } + } + + LogicalProject newProject = project.withProjectsAndChild(newProjects, + filterPredicates.isEmpty() ? filter.child() + : filter.withConjuncts(filterPredicates)); + LogicalFilter newFilter = new LogicalFilter<>(pullUpPredicates, newProject); LogicalAggregate newAgg = agg.withChildren(ImmutableList.of(newFilter)); return (LogicalApply) (apply.withChildren(apply.left(), isRightChildAgg ? newAgg : apply.right().withChildren(newAgg))); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpProjectUnderApply.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpProjectUnderApply.java index 79750d55f6f3c6..b2398ee3b566dc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpProjectUnderApply.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpProjectUnderApply.java @@ -61,9 +61,13 @@ public Rule build() { Plan newCorrelate = apply.withChildren(apply.left(), project.child()); List newProjects = new ArrayList<>(apply.left().getOutput()); if (apply.getSubqueryExpr() instanceof ScalarSubquery) { - Preconditions.checkState(project.getProjects().size() == 1, - "ScalarSubquery should only have one output column"); - newProjects.add(project.getProjects().get(0)); + // unnest correlated scalar subquery may add count(*) and any_value() to project list + // the previous SubqueryToApply rule will make sure of it. So the output column + // may be 1 or 2, we add a check here. + int size = project.getProjects().size(); + Preconditions.checkState(size == 1 || size == 2, + "ScalarSubquery should only have one or two output column"); + newProjects.addAll(project.getProjects()); } if (apply.isMarkJoin()) { newProjects.add(apply.getMarkJoinSlotReference().get()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughProject.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughProject.java index 5842beaf3d6328..f6f7c2d1100d0b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughProject.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PushDownFilterThroughProject.java @@ -47,14 +47,14 @@ public class PushDownFilterThroughProject implements RewriteRuleFactory { @Override public List buildRules() { return ImmutableList.of( - logicalFilter(logicalProject()) + logicalFilter(logicalProject().whenNot(LogicalProject::containsNoneMovableFunction)) .whenNot(filter -> ExpressionUtils.containsWindowExpression(filter.child().getProjects())) .then(PushDownFilterThroughProject::pushDownFilterThroughProject) .toRule(RuleType.PUSH_DOWN_FILTER_THROUGH_PROJECT), // filter(project(limit)) will change to filter(limit(project)) by PushdownProjectThroughLimit, // then we should change filter(limit(project)) to project(filter(limit)) // TODO maybe we could remove this rule, because translator already support filter(limit(project)) - logicalFilter(logicalLimit(logicalProject())) + logicalFilter(logicalLimit(logicalProject().whenNot(LogicalProject::containsNoneMovableFunction))) .whenNot(filter -> ExpressionUtils.containsWindowExpression(filter.child().child().getProjects()) ) diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java index 4068fc6b064660..126e9041721312 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/ExpressionEstimation.java @@ -243,7 +243,7 @@ public ColumnStatistic visitLiteral(Literal literal, Statistics context) { .setMaxValue(literalVal) .setMinValue(literalVal) .setNdv(1) - .setNumNulls(1) + .setNumNulls(literal.isNullLiteral() ? 1 : 0) .setAvgSizeByte(1) .setMinExpr(literal.toLegacyLiteral()) .setMaxExpr(literal.toLegacyLiteral()) @@ -274,13 +274,13 @@ public ColumnStatistic visitBinaryArithmetic(BinaryArithmetic binaryArithmetic, int exprResultTypeWidth = binaryArithmetic.getDataType().width(); double dataSize = exprResultTypeWidth * rowCount; if (binaryArithmetic instanceof Add) { - return new ColumnStatisticBuilder().setCount(rowCount).setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) + return new ColumnStatisticBuilder().setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) .setNumNulls(numNulls).setDataSize(dataSize).setMinValue(leftMin + rightMin) .setMaxValue(leftMax + rightMax) .setMinExpr(null).setMaxExpr(null).build(); } if (binaryArithmetic instanceof Subtract) { - return new ColumnStatisticBuilder().setCount(rowCount).setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) + return new ColumnStatisticBuilder().setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) .setNumNulls(numNulls).setDataSize(dataSize).setMinValue(leftMin - rightMax) .setMaxValue(leftMax - rightMin).setMinExpr(null) .setMaxExpr(null).build(); @@ -297,7 +297,7 @@ public ColumnStatistic visitBinaryArithmetic(BinaryArithmetic binaryArithmetic, Math.max(leftMin * rightMin, leftMin * rightMax), leftMax * rightMin), leftMax * rightMax); - return new ColumnStatisticBuilder().setCount(rowCount).setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) + return new ColumnStatisticBuilder().setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) .setNumNulls(numNulls).setDataSize(dataSize).setMinValue(min).setMaxValue(max) .setMaxExpr(null).setMinExpr(null).build(); } @@ -312,14 +312,14 @@ public ColumnStatistic visitBinaryArithmetic(BinaryArithmetic binaryArithmetic, Math.max(leftMin / noneZeroDivisor(rightMin), leftMin / noneZeroDivisor(rightMax)), leftMax / noneZeroDivisor(rightMin)), leftMax / noneZeroDivisor(rightMax)); - return new ColumnStatisticBuilder().setCount(rowCount).setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) + return new ColumnStatisticBuilder().setNdv(ndv).setAvgSizeByte(leftColStats.avgSizeByte) .setNumNulls(numNulls).setDataSize(binaryArithmetic.getDataType().width()).setMinValue(min) .setMaxValue(max).build(); } if (binaryArithmetic instanceof Mod) { double min = -Math.max(Math.abs(rightMin), Math.abs(rightMax)); double max = -min; - return new ColumnStatisticBuilder().setCount(rowCount).setNdv(ndv) + return new ColumnStatisticBuilder().setNdv(ndv) .setAvgSizeByte(exprResultTypeWidth) .setDataSize(dataSize) .setNumNulls(numNulls) @@ -363,8 +363,7 @@ public ColumnStatistic visitMax(Max max, Statistics context) { public ColumnStatistic visitCount(Count count, Statistics context) { double width = count.getDataType().width(); // for scalar agg, ndv and row count will be normalized by 1 in StatsCalculator.computeAggregate() - return new ColumnStatisticBuilder(ColumnStatistic.UNKNOWN).setCount(context.getRowCount()) - .setAvgSizeByte(width).build(); + return new ColumnStatisticBuilder(ColumnStatistic.UNKNOWN).setAvgSizeByte(width).build(); } // TODO: return a proper estimated stat after supports histogram @@ -382,14 +381,14 @@ public ColumnStatistic visitAvg(Avg avg, Statistics context) { @Override public ColumnStatistic visitYear(Year year, Statistics context) { ColumnStatistic childStat = year.child().accept(this, context); + double rowCount = context.getRowCount(); long minYear = 1970; long maxYear = 2038; return new ColumnStatisticBuilder() - .setCount(childStat.count) .setNdv(maxYear - minYear + 1) .setAvgSizeByte(4) .setNumNulls(childStat.numNulls) - .setDataSize(4 * childStat.count) + .setDataSize(4 * rowCount) .setMinValue(minYear) .setMaxValue(maxYear).setMinExpr(null).build(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java index a65a07fea30bdf..b3576a0e58e61e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/FilterEstimation.java @@ -323,6 +323,8 @@ private Statistics estimateEqualTo(ComparisonPredicate cp, ColumnStatistic stats selectivity = DEFAULT_INEQUALITY_COEFFICIENT; } else { double ndv = statsForLeft.ndv; + double numNulls = statsForLeft.numNulls; + double rowCount = context.statistics.getRowCount(); if (statsForRight.isUnKnown) { if (ndv >= 1.0) { selectivity = 1.0 / ndv; @@ -338,7 +340,7 @@ private Statistics estimateEqualTo(ComparisonPredicate cp, ColumnStatistic stats } else { selectivity = DEFAULT_INEQUALITY_COEFFICIENT; } - selectivity = getNotNullSelectivity(statsForLeft, selectivity); + selectivity = getNotNullSelectivity(numNulls, rowCount, ndv, selectivity); } } Statistics equalStats = context.statistics.withSel(selectivity); @@ -451,7 +453,8 @@ A not in (1, 2, 3, 100): compareExprStatsBuilder.setNumNulls(0); Statistics estimated = new StatisticsBuilder(context.statistics).build(); ColumnStatistic stats = compareExprStatsBuilder.build(); - selectivity = getNotNullSelectivity(stats, selectivity); + selectivity = getNotNullSelectivity(compareExprStats.numNulls, estimated.getRowCount(), + compareExprStats.ndv, selectivity); estimated = estimated.withSel(selectivity); estimated.addColumnStats(compareExpr, stats); context.addKeyIfSlot(compareExpr); @@ -546,7 +549,7 @@ public Statistics visitIsNull(IsNull isNull, EstimationContext context) { outputRowCount = Math.max(outputRowCount, 1); } ColumnStatisticBuilder colBuilder = new ColumnStatisticBuilder(childColStats); - colBuilder.setCount(outputRowCount).setNumNulls(outputRowCount) + colBuilder.setNumNulls(outputRowCount) .setMaxValue(Double.POSITIVE_INFINITY) .setMinValue(Double.NEGATIVE_INFINITY) .setNdv(0); @@ -597,7 +600,6 @@ private Statistics estimateBinaryComparisonFilter(Expression leftExpr, DataType .setMaxValue(Double.POSITIVE_INFINITY) .setMaxExpr(null) .setNdv(0) - .setCount(0) .setNumNulls(0); } else { leftColumnStatisticBuilder = new ColumnStatisticBuilder(leftStats) @@ -615,9 +617,8 @@ private Statistics estimateBinaryComparisonFilter(Expression leftExpr, DataType } else { sel = Math.max(sel, RANGE_SELECTIVITY_THRESHOLD); } - sel = getNotNullSelectivity(leftStats, sel); + sel = getNotNullSelectivity(leftStats.numNulls, context.statistics.getRowCount(), leftStats.ndv, sel); updatedStatistics = context.statistics.withSel(sel); - leftColumnStatisticBuilder.setCount(updatedStatistics.getRowCount()); } updatedStatistics.addColumnStats(leftExpr, leftColumnStatisticBuilder.build()); context.addKeyIfSlot(leftExpr); @@ -720,36 +721,27 @@ private Statistics estimateColumnLessThanColumn(Expression leftExpr, ColumnStati @Override public Statistics visitLike(Like like, EstimationContext context) { StatisticsBuilder statsBuilder = new StatisticsBuilder(context.statistics); - statsBuilder.setRowCount(context.statistics.getRowCount() * DEFAULT_LIKE_COMPARISON_SELECTIVITY); + double rowCount = context.statistics.getRowCount() * DEFAULT_LIKE_COMPARISON_SELECTIVITY; + statsBuilder.setRowCount(rowCount); if (like.left() instanceof Slot) { ColumnStatistic origin = context.statistics.findColumnStatistics(like.left()); Preconditions.checkArgument(origin != null, "col stats not found. slot=%s in %s", like.left().toSql(), like.toSql()); ColumnStatisticBuilder colBuilder = new ColumnStatisticBuilder(origin); - double selectivity = StatsMathUtil.divide(DEFAULT_LIKE_COMPARISON_SELECTIVITY, origin.ndv); - double notNullSel = getNotNullSelectivity(origin, selectivity); - colBuilder.setNdv(origin.ndv * DEFAULT_LIKE_COMPARISON_SELECTIVITY) - .setCount(notNullSel * context.statistics.getRowCount()).setNumNulls(0); + colBuilder.setNdv(origin.ndv * DEFAULT_LIKE_COMPARISON_SELECTIVITY).setNumNulls(0); statsBuilder.putColumnStatistics(like.left(), colBuilder.build()); context.addKeyIfSlot(like.left()); } return statsBuilder.build(); } - private double getNotNullSelectivity(ColumnStatistic stats, double origSel) { - double rowCount = stats.count; - double numNulls = stats.numNulls; - - // comment following check since current rowCount and ndv may be inconsistant - // e.g, rowCount has been reduced by one filter but another filter column's - // ndv and numNull remains originally, which will unexpectedly go into the following - // normalization. - - //if (numNulls > rowCount - ndv) { - // numNulls = rowCount - ndv > 0 ? rowCount - ndv : 0; - //} - double notNullSel = rowCount <= 1.0 ? 1.0 : 1 - Statistics.getValidSelectivity(numNulls / rowCount); + private double getNotNullSelectivity(double origNumNulls, double origRowCount, double origNdv, double origSel) { + if (origNumNulls > origRowCount - origNdv) { + origNumNulls = origRowCount - origNdv > 0 ? origRowCount - origNdv : 0; + } + double notNullSel = origRowCount <= 1.0 ? 1.0 : 1 - Statistics + .getValidSelectivity(origNumNulls / origRowCount); double validSel = origSel * notNullSel; return Statistics.getValidSelectivity(validSel); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/StatsCalculator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/StatsCalculator.java index 5946192a27eff9..bac66f34ae665f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/StatsCalculator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/stats/StatsCalculator.java @@ -182,6 +182,11 @@ public class StatsCalculator extends DefaultPlanVisitor { private CascadesContext cascadesContext; + private StatsCalculator(CascadesContext context) { + this.groupExpression = null; + this.cascadesContext = context; + } + private StatsCalculator(GroupExpression groupExpression, boolean forbidUnknownColStats, Map columnStatisticMap, boolean isPlayNereidsDump, Map cteIdToStats, CascadesContext context) { @@ -205,6 +210,27 @@ public Map getTotalColumnStatisticMap() { return totalColumnStatisticMap; } + /** + * disable join reorder if any table row count is not available. + */ + public static void disableJoinReorderIfTableRowCountNotAvailable( + List scans, CascadesContext context) { + StatsCalculator calculator = new StatsCalculator(context); + for (LogicalOlapScan scan : scans) { + double rowCount = calculator.getOlapTableRowCount(scan); + if (rowCount == -1 && ConnectContext.get() != null) { + try { + ConnectContext.get().getSessionVariable().disableNereidsJoinReorderOnce(); + LOG.info("disable join reorder since row count not available: " + + scan.getTable().getNameWithFullQualifiers()); + } catch (Exception e) { + LOG.info("disableNereidsJoinReorderOnce failed"); + } + return; + } + } + } + /** * estimate stats */ @@ -217,15 +243,6 @@ public static StatsCalculator estimate(GroupExpression groupExpression, boolean return statsCalculator; } - public static StatsCalculator estimate(GroupExpression groupExpression, boolean forbidUnknownColStats, - Map columnStatisticMap, boolean isPlayNereidsDump, CascadesContext context) { - return StatsCalculator.estimate(groupExpression, - forbidUnknownColStats, - columnStatisticMap, - isPlayNereidsDump, - new HashMap<>(), context); - } - // For unit test only public static void estimate(GroupExpression groupExpression, CascadesContext context) { StatsCalculator statsCalculator = new StatsCalculator(groupExpression, false, @@ -364,6 +381,9 @@ private void checkIfUnknownStatsUsedAsKey(StatisticsBuilder builder) { } } + /** + * if the table is not analyzed and BE does not report row count, return -1 + */ private double getOlapTableRowCount(OlapScan olapScan) { OlapTable olapTable = olapScan.getTable(); AnalysisManager analysisManager = Env.getCurrentEnv().getAnalysisManager(); @@ -403,8 +423,7 @@ private Statistics computeOlapScan(OlapScan olapScan) { for (Slot slot : ((Relation) olapScan).getOutput()) { if (derivedStats.findColumnStatistics(slot) == null) { derivedStats.addColumnStats(slot, - new ColumnStatisticBuilder(ColumnStatistic.UNKNOWN) - .setCount(derivedRowCount).build()); + new ColumnStatisticBuilder(ColumnStatistic.UNKNOWN, derivedRowCount).build()); } } return derivedStats; @@ -431,7 +450,7 @@ private Statistics computeOlapScan(OlapScan olapScan) { // get row count from any visible slotReference's colStats for (Slot slot : ((Plan) olapScan).getOutput()) { builder.putColumnStatistics(slot, - new ColumnStatisticBuilder(ColumnStatistic.UNKNOWN).setCount(tableRowCount).build()); + new ColumnStatisticBuilder(ColumnStatistic.UNKNOWN, tableRowCount).build()); } setHasUnknownColStatsInStatementContext(); return builder.setRowCount(tableRowCount).build(); @@ -463,8 +482,8 @@ private Statistics computeOlapScan(OlapScan olapScan) { }); for (SlotReference slot : visibleOutputSlots) { ColumnStatistic cache = getColumnStatsFromPartitionCache(olapScan, slot, selectedPartitionNames); - ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(cache); - colStatsBuilder.setCount(selectedPartitionsRowCount); + ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(cache, + selectedPartitionsRowCount); colStatsBuilder.normalizeAvgSizeByte(slot); builder.putColumnStatistics(slot, colStatsBuilder.build()); } @@ -478,8 +497,7 @@ private Statistics computeOlapScan(OlapScan olapScan) { // get table level stats for (SlotReference slot : visibleOutputSlots) { ColumnStatistic cache = getColumnStatsFromTableCache((CatalogRelation) olapScan, slot); - ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(cache); - colStatsBuilder.setCount(tableRowCount); + ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(cache, tableRowCount); colStatsBuilder.normalizeAvgSizeByte(slot); builder.putColumnStatistics(slot, colStatsBuilder.build()); } @@ -1062,8 +1080,7 @@ private Statistics computeCatalogRelation(CatalogRelation catalogRelation) { } else { cache = getColumnStatsFromTableCache(catalogRelation, slot); } - ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(cache); - colStatsBuilder.setCount(tableRowCount); + ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(cache, tableRowCount); builder.putColumnStatistics(slot, colStatsBuilder.build()); } checkIfUnknownStatsUsedAsKey(builder); @@ -1187,7 +1204,6 @@ private Statistics computeRepeat(Repeat repeat) { ColumnStatistic stats = kv.getValue(); ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(stats); columnStatisticBuilder - .setCount(stats.count < 0 ? stats.count : stats.count * groupingSetNum) .setNumNulls(stats.numNulls < 0 ? stats.numNulls : stats.numNulls * groupingSetNum) .setDataSize(stats.dataSize < 0 ? stats.dataSize : stats.dataSize * groupingSetNum); return Pair.of(kv.getKey(), columnStatisticBuilder.build()); @@ -1322,12 +1338,11 @@ private Statistics computeGenerate(Generate generate) { double count = stats.getRowCount() * generate.getGeneratorOutput().size() * statsFactor; Map columnStatsMap = Maps.newHashMap(); for (Map.Entry entry : stats.columnStatistics().entrySet()) { - ColumnStatistic columnStatistic = new ColumnStatisticBuilder(entry.getValue()).setCount(count).build(); + ColumnStatistic columnStatistic = new ColumnStatisticBuilder(entry.getValue()).build(); columnStatsMap.put(entry.getKey(), columnStatistic); } for (Slot output : generate.getGeneratorOutput()) { ColumnStatistic columnStatistic = new ColumnStatisticBuilder() - .setCount(count) .setMinValue(Double.NEGATIVE_INFINITY) .setMaxValue(Double.POSITIVE_INFINITY) .setNdv(count) @@ -1349,8 +1364,7 @@ private Statistics computeWindow(Window windowOperator) { "need WindowExpression, but we meet " + expr); WindowExpression windExpr = (WindowExpression) expr.child(0); ColumnStatisticBuilder colStatsBuilder = new ColumnStatisticBuilder(); - colStatsBuilder.setCount(childStats.getRowCount()) - .setOriginal(null); + colStatsBuilder.setOriginal(null); Double partitionCount = windExpr.getPartitionKeys().stream().map(key -> { ColumnStatistic keyStats = childStats.findColumnStatistics(key); @@ -1365,8 +1379,7 @@ private Statistics computeWindow(Window windowOperator) { if (partitionCount == -1.0) { // partition key stats are all unknown - colStatsBuilder.setCount(childStats.getRowCount()) - .setNdv(1) + colStatsBuilder.setNdv(1) .setMinValue(Double.NEGATIVE_INFINITY) .setMaxValue(Double.POSITIVE_INFINITY); } else { @@ -1411,7 +1424,7 @@ private Statistics computeWindow(Window windowOperator) { private ColumnStatistic unionColumn(ColumnStatistic leftStats, double leftRowCount, ColumnStatistic rightStats, double rightRowCount, DataType dataType) { if (leftStats.isUnKnown() || rightStats.isUnKnown()) { - return new ColumnStatisticBuilder(leftStats).setCount(leftRowCount + rightRowCount).build(); + return new ColumnStatisticBuilder(leftStats).build(); } ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(); columnStatisticBuilder.setMaxValue(Math.max(leftStats.maxValue, rightStats.maxValue)); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExecFunction.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExecFunction.java index 126449f4b04e34..a5c656d5ffa185 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExecFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExecFunction.java @@ -34,18 +34,4 @@ */ String name(); - /** - * args type - */ - String[] argTypes(); - - /** - * return type - */ - String returnType(); - - /** - * hasVarArgsc - */ - boolean varArgs() default false; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExpressionEvaluator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExpressionEvaluator.java index f3d471b2abedc4..0c612e42ddaf11 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExpressionEvaluator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ExpressionEvaluator.java @@ -18,8 +18,6 @@ package org.apache.doris.nereids.trees.expressions; import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.Type; -import org.apache.doris.common.AnalysisException; import org.apache.doris.nereids.trees.expressions.functions.BoundFunction; import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction; import org.apache.doris.nereids.trees.expressions.functions.executable.DateTimeAcquire; @@ -30,10 +28,10 @@ import org.apache.doris.nereids.trees.expressions.functions.executable.StringArithmetic; import org.apache.doris.nereids.trees.expressions.functions.executable.TimeRoundSeries; import org.apache.doris.nereids.trees.expressions.literal.DateLiteral; +import org.apache.doris.nereids.trees.expressions.literal.DateTimeLiteral; import org.apache.doris.nereids.trees.expressions.literal.Literal; import org.apache.doris.nereids.trees.expressions.literal.NullLiteral; import org.apache.doris.nereids.types.DataType; -import org.apache.doris.nereids.util.TypeCoercionUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMultimap; @@ -41,7 +39,6 @@ import java.lang.reflect.Array; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -52,7 +49,7 @@ public enum ExpressionEvaluator { INSTANCE; - private ImmutableMultimap functions; + private ImmutableMultimap functions; ExpressionEvaluator() { registerFunctions(); @@ -68,23 +65,16 @@ public Expression eval(Expression expression) { } String fnName = null; - DataType[] args = null; DataType ret = expression.getDataType(); if (expression instanceof BinaryArithmetic) { BinaryArithmetic arithmetic = (BinaryArithmetic) expression; fnName = arithmetic.getLegacyOperator().getName(); - args = new DataType[]{arithmetic.left().getDataType(), arithmetic.right().getDataType()}; } else if (expression instanceof TimestampArithmetic) { TimestampArithmetic arithmetic = (TimestampArithmetic) expression; fnName = arithmetic.getFuncName(); - args = new DataType[]{arithmetic.left().getDataType(), arithmetic.right().getDataType()}; } else if (expression instanceof BoundFunction) { BoundFunction function = ((BoundFunction) expression); fnName = function.getName(); - args = new DataType[function.arity()]; - for (int i = 0; i < function.children().size(); i++) { - args[i] = function.child(i).getDataType(); - } } if ((Env.getCurrentEnv().isNullResultWithOneNullParamFunction(fnName))) { @@ -95,22 +85,26 @@ public Expression eval(Expression expression) { } } - return invoke(expression, fnName, args); + return invoke(expression, fnName); } - private Expression invoke(Expression expression, String fnName, DataType[] args) { - FunctionSignature signature = new FunctionSignature(fnName, args, null, false); - FunctionInvoker invoker = getFunction(signature); - if (invoker != null) { + private Expression invoke(Expression expression, String fnName) { + Method method = getFunction(fnName, expression.children()); + if (method != null) { try { - if (invoker.getSignature().hasVarArgs()) { - int fixedArgsSize = invoker.getSignature().getArgTypes().length - 1; - int totalSize = expression.children().size(); - Class[] parameterTypes = invoker.getMethod().getParameterTypes(); - Class parameterType = parameterTypes[parameterTypes.length - 1]; + int varSize = method.getParameterTypes().length; + if (varSize == 0) { + return (Literal) method.invoke(null, expression.children().toArray()); + } + boolean hasVarArgs = method.getParameterTypes()[varSize - 1].isArray(); + if (hasVarArgs) { + int fixedArgsSize = varSize - 1; + int inputSize = expression.children().size(); + Class[] parameterTypes = method.getParameterTypes(); + Class parameterType = parameterTypes[varSize - 1]; Class componentType = parameterType.getComponentType(); - Object varArgs = Array.newInstance(componentType, totalSize - fixedArgsSize); - for (int i = fixedArgsSize; i < totalSize; i++) { + Object varArgs = Array.newInstance(componentType, inputSize - fixedArgsSize); + for (int i = fixedArgsSize; i < inputSize; i++) { if (!(expression.children().get(i) instanceof NullLiteral)) { Array.set(varArgs, i - fixedArgsSize, expression.children().get(i)); } @@ -121,59 +115,70 @@ private Expression invoke(Expression expression, String fnName, DataType[] args) } objects[fixedArgsSize] = varArgs; - return invoker.invokeVars(objects); + return (Literal) method.invoke(null, varArgs); } - return invoker.invoke(expression.children()); - } catch (AnalysisException e) { + return (Literal) method.invoke(null, expression.children().toArray()); + } catch (InvocationTargetException | IllegalAccessException | IllegalArgumentException e) { return expression; } } return expression; } - private FunctionInvoker getFunction(FunctionSignature signature) { - Collection functionInvokers = functions.get(signature.getName()); - for (FunctionInvoker candidate : functionInvokers) { - DataType[] candidateTypes = candidate.getSignature().getArgTypes(); - DataType[] expectedTypes = signature.getArgTypes(); + private boolean canDownCastTo(Class expect, Class input) { + if (DateLiteral.class.isAssignableFrom(expect) + || DateTimeLiteral.class.isAssignableFrom(expect)) { + return expect.equals(input); + } + return expect.isAssignableFrom(input); + } - if (candidate.getSignature().hasVarArgs()) { - if (candidateTypes.length > expectedTypes.length) { + private Method getFunction(String fnName, List inputs) { + Collection expectMethods = functions.get(fnName); + for (Method expect : expectMethods) { + boolean match = true; + int varSize = expect.getParameterTypes().length; + if (varSize == 0) { + if (inputs.size() == 0) { + return expect; + } else { continue; } - boolean match = true; - for (int i = 0; i < candidateTypes.length - 1; i++) { - if (!(expectedTypes[i].toCatalogDataType().matchesType(candidateTypes[i].toCatalogDataType()))) { + } + boolean hasVarArgs = expect.getParameterTypes()[varSize - 1].isArray(); + if (hasVarArgs) { + int fixedArgsSize = varSize - 1; + int inputSize = inputs.size(); + if (inputSize <= fixedArgsSize) { + continue; + } + Class[] expectVarTypes = expect.getParameterTypes(); + for (int i = 0; i < fixedArgsSize; i++) { + if (!canDownCastTo(expectVarTypes[i], inputs.get(i).getClass())) { match = false; - break; } } - Type varType = candidateTypes[candidateTypes.length - 1].toCatalogDataType(); - for (int i = candidateTypes.length - 1; i < expectedTypes.length; i++) { - if (!(expectedTypes[i].toCatalogDataType().matchesType(varType))) { + Class varArgsType = expectVarTypes[varSize - 1]; + Class varArgType = varArgsType.getComponentType(); + for (int i = fixedArgsSize; i < inputSize; i++) { + if (!canDownCastTo(varArgType, inputs.get(i).getClass())) { match = false; - break; } } - if (match) { - return candidate; - } else { + } else { + int inputSize = inputs.size(); + if (inputSize != varSize) { continue; } - } - if (candidateTypes.length != expectedTypes.length) { - continue; - } - - boolean match = true; - for (int i = 0; i < candidateTypes.length; i++) { - if (!(expectedTypes[i].toCatalogDataType().matchesType(candidateTypes[i].toCatalogDataType()))) { - match = false; - break; + Class[] expectVarTypes = expect.getParameterTypes(); + for (int i = 0; i < varSize; i++) { + if (!canDownCastTo(expectVarTypes[i], inputs.get(i).getClass())) { + match = false; + } } } if (match) { - return candidate; + return expect; } } return null; @@ -183,7 +188,7 @@ private void registerFunctions() { if (functions != null) { return; } - ImmutableMultimap.Builder mapBuilder = new ImmutableMultimap.Builder<>(); + ImmutableMultimap.Builder mapBuilder = new ImmutableMultimap.Builder<>(); List> classes = ImmutableList.of( DateTimeAcquire.class, DateTimeExtractAndTransform.class, @@ -208,92 +213,10 @@ private void registerFunctions() { this.functions = mapBuilder.build(); } - private void registerFEFunction(ImmutableMultimap.Builder mapBuilder, + private void registerFEFunction(ImmutableMultimap.Builder mapBuilder, Method method, ExecFunction annotation) { if (annotation != null) { - String name = annotation.name(); - DataType returnType = DataType.convertFromString(annotation.returnType()); - List argTypes = new ArrayList<>(); - for (String type : annotation.argTypes()) { - argTypes.add(TypeCoercionUtils.replaceDecimalV3WithWildcard(DataType.convertFromString(type))); - } - DataType[] array = new DataType[argTypes.size()]; - for (int i = 0; i < argTypes.size(); i++) { - array[i] = argTypes.get(i); - } - FunctionSignature signature = new FunctionSignature(name, array, returnType, annotation.varArgs()); - mapBuilder.put(name, new FunctionInvoker(method, signature)); - } - } - - /** - * function invoker. - */ - public static class FunctionInvoker { - private final Method method; - private final FunctionSignature signature; - - public FunctionInvoker(Method method, FunctionSignature signature) { - this.method = method; - this.signature = signature; - } - - public Method getMethod() { - return method; - } - - public FunctionSignature getSignature() { - return signature; - } - - public Literal invoke(List args) throws AnalysisException { - try { - return (Literal) method.invoke(null, args.toArray()); - } catch (InvocationTargetException | IllegalAccessException | IllegalArgumentException e) { - throw new AnalysisException(e.getLocalizedMessage()); - } - } - - public Literal invokeVars(Object[] args) throws AnalysisException { - try { - return (Literal) method.invoke(null, args); - } catch (InvocationTargetException | IllegalAccessException | IllegalArgumentException e) { - throw new AnalysisException(e.getLocalizedMessage()); - } + mapBuilder.put(annotation.name(), method); } } - - /** - * function signature. - */ - public static class FunctionSignature { - private final String name; - private final DataType[] argTypes; - private final DataType returnType; - private final boolean hasVarArgs; - - public FunctionSignature(String name, DataType[] argTypes, DataType returnType, boolean hasVarArgs) { - this.name = name; - this.argTypes = argTypes; - this.returnType = returnType; - this.hasVarArgs = hasVarArgs; - } - - public DataType[] getArgTypes() { - return argTypes; - } - - public DataType getReturnType() { - return returnType; - } - - public String getName() { - return name; - } - - public boolean hasVarArgs() { - return hasVarArgs; - } - } - } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ScalarSubquery.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ScalarSubquery.java index 88b354ae8214a2..178debe7db83a5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ScalarSubquery.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/ScalarSubquery.java @@ -19,10 +19,17 @@ import org.apache.doris.nereids.exceptions.UnboundException; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.trees.plans.Plan; +import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; +import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.trees.plans.logical.LogicalSort; +import org.apache.doris.nereids.trees.plans.logical.LogicalSubQueryAlias; import org.apache.doris.nereids.types.DataType; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import java.util.List; import java.util.Objects; @@ -33,20 +40,41 @@ */ public class ScalarSubquery extends SubqueryExpr { + private final boolean hasTopLevelScalarAgg; + public ScalarSubquery(LogicalPlan subquery) { - super(Objects.requireNonNull(subquery, "subquery can not be null")); + this(subquery, ImmutableList.of()); } public ScalarSubquery(LogicalPlan subquery, List correlateSlots) { - this(Objects.requireNonNull(subquery, "subquery can not be null"), - Objects.requireNonNull(correlateSlots, "correlateSlots can not be null"), - Optional.empty()); + this(subquery, correlateSlots, Optional.empty()); } public ScalarSubquery(LogicalPlan subquery, List correlateSlots, Optional typeCoercionExpr) { super(Objects.requireNonNull(subquery, "subquery can not be null"), Objects.requireNonNull(correlateSlots, "correlateSlots can not be null"), typeCoercionExpr); + hasTopLevelScalarAgg = findTopLevelScalarAgg(subquery, ImmutableSet.copyOf(correlateSlots)) != null; + } + + public boolean hasTopLevelScalarAgg() { + return hasTopLevelScalarAgg; + } + + /** + * getTopLevelScalarAggFunction + */ + public Optional getTopLevelScalarAggFunction() { + Plan plan = findTopLevelScalarAgg(queryPlan, ImmutableSet.copyOf(correlateSlots)); + if (plan != null) { + LogicalAggregate aggregate = (LogicalAggregate) plan; + Preconditions.checkState(aggregate.getAggregateFunctions().size() == 1, + "in scalar subquery, should only return 1 column 1 row, " + + "but we found multiple columns ", aggregate.getOutputExpressions()); + return Optional.of((NamedExpression) aggregate.getOutputExpressions().get(0)); + } else { + return Optional.empty(); + } } @Override @@ -81,4 +109,30 @@ public Expression withTypeCoercion(DataType dataType) { public ScalarSubquery withSubquery(LogicalPlan subquery) { return new ScalarSubquery(subquery, correlateSlots, typeCoercionExpr); } + + /** + * for correlated subquery, we define top level scalar agg as if it meets the both 2 conditions: + * 1. The agg or its child contains correlated slots + * 2. only project, sort and subquery alias node can be agg's parent + */ + public static Plan findTopLevelScalarAgg(Plan plan, ImmutableSet slots) { + if (plan instanceof LogicalAggregate) { + if (((LogicalAggregate) plan).getGroupByExpressions().isEmpty() && plan.containsSlots(slots)) { + return plan; + } else { + return null; + } + } else if (plan instanceof LogicalProject || plan instanceof LogicalSubQueryAlias + || plan instanceof LogicalSort) { + for (Plan child : plan.children()) { + Plan result = findTopLevelScalarAgg(child, slots); + if (result != null) { + return result; + } + } + return null; + } else { + return null; + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/AlwaysNotNullable.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/AlwaysNotNullable.java index 8fda4d4b020f2b..6b12f9cd642992 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/AlwaysNotNullable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/AlwaysNotNullable.java @@ -17,6 +17,9 @@ package org.apache.doris.nereids.trees.expressions.functions; +import org.apache.doris.nereids.exceptions.AnalysisException; +import org.apache.doris.nereids.trees.expressions.Expression; + /** * nullable is always false. * @@ -27,4 +30,10 @@ public interface AlwaysNotNullable extends ComputeNullable { default boolean nullable() { return false; } + + // return value of this function if the input data is empty. + // for example, count(*) of empty table is 0; + default Expression resultForEmptyInput() { + throw new AnalysisException("should implement resultForEmptyInput() for " + this.getClass()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/ComputePrecisionForRound.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/ComputePrecisionForRound.java index b47804e23ff2e1..b07b7d384d8eeb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/ComputePrecisionForRound.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/ComputePrecisionForRound.java @@ -40,8 +40,10 @@ default FunctionSignature computePrecision(FunctionSignature signature) { // If scale arg is an integer literal, or it is a cast(Integer as Integer) // then we will try to use its value as result scale // In any other cases, we will make sure result decimal has same scale with input. - if ((floatLength.isLiteral() && floatLength.getDataType() instanceof Int32OrLessType) + if ((floatLength.isLiteral() && !floatLength.isNullLiteral() + && floatLength.getDataType() instanceof Int32OrLessType) || (floatLength instanceof Cast && floatLength.child(0).isLiteral() + && !floatLength.child(0).isNullLiteral() && floatLength.child(0).getDataType() instanceof Int32OrLessType)) { if (floatLength instanceof Cast) { scale = ((IntegerLikeLiteral) floatLength.child(0)).getIntValue(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/NotAllowFallback.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/NoneMovableFunction.java similarity index 76% rename from fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/NotAllowFallback.java rename to fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/NoneMovableFunction.java index 72d8c82e5996a9..46d5e65c9211e7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/NotAllowFallback.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/NoneMovableFunction.java @@ -15,11 +15,11 @@ // specific language governing permissions and limitations // under the License. -package org.apache.doris.nereids.trees.plans.commands; +package org.apache.doris.nereids.trees.expressions.functions; /** - * The class that implements this interface does not allow fallback to OriginalPlanner, - * for example, some new features are not implemented by the old parser + * FunctionTrait. Means shouldn't push filter through the project with NoneMovableFunction + * and should not prune any NoneMovableFunction */ -public interface NotAllowFallback { +public interface NoneMovableFunction { } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/ArrayAgg.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/ArrayAgg.java index 7e85eafcd2ac73..bc91207e31f3a6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/ArrayAgg.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/ArrayAgg.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.ArrayLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.ArrayType; @@ -30,6 +31,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.util.ArrayList; import java.util.List; /** @@ -64,4 +66,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new ArrayLiteral(new ArrayList<>(), this.getDataType()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapAgg.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapAgg.java index 1d32910e1a9c49..eaf766b908d7af 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapAgg.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapAgg.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.scalar.BitmapEmpty; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.types.BigIntType; import org.apache.doris.nereids.types.BitmapType; @@ -63,4 +64,9 @@ public AggregateFunction withDistinctAndChildren(boolean distinct, List R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new BitmapEmpty(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnion.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnion.java index 3b3a37bb7607ed..cd0756a1c9361f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnion.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnion.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; +import org.apache.doris.nereids.trees.expressions.functions.scalar.BitmapEmpty; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BitmapType; @@ -89,4 +90,9 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return true; } + + @Override + public Expression resultForEmptyInput() { + return new BitmapEmpty(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionCount.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionCount.java index 08772b06d57447..593c814f22d159 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionCount.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionCount.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; @@ -90,4 +91,10 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return false; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } + } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionInt.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionInt.java index edae2d187e4b1b..2efe1631176c86 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionInt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/BitmapUnionInt.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; @@ -94,4 +95,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectList.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectList.java index 470054aa894b5c..d6cca2d0b90b6d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectList.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectList.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.ArrayLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.ArrayType; @@ -31,6 +32,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.util.ArrayList; import java.util.List; /** @@ -95,4 +97,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new ArrayLiteral(new ArrayList<>(), this.getDataType()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectSet.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectSet.java index 5eeab663fd2356..d9e7e7227c6f86 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectSet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CollectSet.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.ArrayLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.ArrayType; @@ -31,6 +32,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.util.ArrayList; import java.util.List; /** @@ -101,4 +103,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new ArrayLiteral(new ArrayList<>(), this.getDataType()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Count.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Count.java index 2bfcbe91b35f84..10874d47ee3df9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Count.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Count.java @@ -24,6 +24,7 @@ import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; import org.apache.doris.nereids.trees.expressions.functions.window.SupportWindowAnalytic; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.literal.Literal; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; @@ -157,4 +158,9 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return true; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CountByEnum.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CountByEnum.java index 721471add66d80..2a4ee7be3f49c2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CountByEnum.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/CountByEnum.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.StringLiteral; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.StringType; import org.apache.doris.nereids.util.ExpressionUtils; @@ -62,4 +63,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new StringLiteral("[]"); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/GroupArrayIntersect.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/GroupArrayIntersect.java index 3d6216d0d09161..0720d6838bb1b6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/GroupArrayIntersect.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/GroupArrayIntersect.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.ArrayLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.ArrayType; @@ -29,6 +30,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.util.ArrayList; import java.util.List; /** @@ -73,4 +75,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new ArrayLiteral(new ArrayList<>(), this.getDataType()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Histogram.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Histogram.java index 1f0c2d60f15644..6b0a2759823f3a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Histogram.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Histogram.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.SearchSignature; +import org.apache.doris.nereids.trees.expressions.literal.VarcharLiteral; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.DoubleType; import org.apache.doris.nereids.types.IntegerType; @@ -112,4 +113,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new VarcharLiteral("{\"num_buckets\":0,\"buckets\":[]}"); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnion.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnion.java index 7f98d1b6c0d78c..b81fad270b0cda 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnion.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnion.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; +import org.apache.doris.nereids.trees.expressions.functions.scalar.HllEmpty; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.DataType; @@ -89,4 +90,9 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return true; } + + @Override + public Expression resultForEmptyInput() { + return new HllEmpty(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnionAgg.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnionAgg.java index 15d02e73faff75..b14b61b5be07e9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnionAgg.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/HllUnionAgg.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; @@ -90,4 +91,9 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return false; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/IntersectCount.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/IntersectCount.java index 17a74d3eac9461..c013b2e8b4c31d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/IntersectCount.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/IntersectCount.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.BitmapIntersectFunction; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; import org.apache.doris.nereids.types.BitmapType; @@ -77,4 +78,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MapAgg.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MapAgg.java index 36cf5ef7edf33d..744d4a23a667e5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MapAgg.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MapAgg.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.MapLiteral; import org.apache.doris.nereids.trees.expressions.shape.BinaryExpression; import org.apache.doris.nereids.types.MapType; import org.apache.doris.nereids.types.coercion.AnyDataType; @@ -29,6 +30,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.util.ArrayList; import java.util.List; /** @@ -69,4 +71,9 @@ public MapAgg withDistinctAndChildren(boolean distinct, List childre public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new MapLiteral(new ArrayList<>(), new ArrayList<>(), this.getDataType()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctCount.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctCount.java index 7287fc5c554ffe..68d31e3e7bd19d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctCount.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctCount.java @@ -23,6 +23,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; import org.apache.doris.nereids.types.coercion.AnyDataType; @@ -87,4 +88,9 @@ public boolean mustUseMultiDistinctAgg() { public Expression withMustUseMultiDistinctAgg(boolean mustUseMultiDistinctAgg) { return new MultiDistinctCount(mustUseMultiDistinctAgg, false, children); } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctSum0.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctSum0.java index 628e18e4772ae9..2b0eda06b4264a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctSum0.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/MultiDistinctSum0.java @@ -23,12 +23,19 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ComputePrecisionForSum; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; +import org.apache.doris.nereids.trees.expressions.literal.DecimalV3Literal; +import org.apache.doris.nereids.trees.expressions.literal.DoubleLiteral; +import org.apache.doris.nereids.trees.expressions.literal.LargeIntLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.DataType; +import org.apache.doris.nereids.types.DecimalV3Type; import com.google.common.base.Preconditions; +import java.math.BigDecimal; +import java.math.BigInteger; import java.util.List; /** MultiDistinctSum0 */ @@ -89,4 +96,20 @@ public boolean mustUseMultiDistinctAgg() { public Expression withMustUseMultiDistinctAgg(boolean mustUseMultiDistinctAgg) { return new MultiDistinctSum0(mustUseMultiDistinctAgg, false, children.get(0)); } + + @Override + public Expression resultForEmptyInput() { + DataType dataType = getDataType(); + if (dataType.isBigIntType()) { + return new BigIntLiteral(0); + } else if (dataType.isLargeIntType()) { + return new LargeIntLiteral(new BigInteger("0")); + } else if (dataType.isDecimalV3Type()) { + return new DecimalV3Literal((DecimalV3Type) dataType, new BigDecimal("0")); + } else if (dataType.isDoubleType()) { + return new DoubleLiteral(0); + } else { + return new DoubleLiteral(0); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Ndv.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Ndv.java index 25e5fb103dab8c..ea90bc587911be 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Ndv.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Ndv.java @@ -24,6 +24,7 @@ import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; @@ -90,4 +91,9 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return false; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/PercentileArray.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/PercentileArray.java index efc2ef0304f67c..b4d7467e4c00b7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/PercentileArray.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/PercentileArray.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.ArrayLiteral; import org.apache.doris.nereids.trees.expressions.shape.BinaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.ArrayType; @@ -34,6 +35,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.util.ArrayList; import java.util.List; /** @@ -86,4 +88,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new ArrayLiteral(new ArrayList<>(), this.getDataType()); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/QuantileUnion.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/QuantileUnion.java index fba37528fd8d4c..3d0729775a5cec 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/QuantileUnion.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/QuantileUnion.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.scalar.QuantileStateEmpty; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.DataType; @@ -83,4 +84,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new QuantileStateEmpty(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/SequenceCount.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/SequenceCount.java index 5bbf0cf0b43127..7af112ef8e800f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/SequenceCount.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/SequenceCount.java @@ -21,6 +21,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; import org.apache.doris.nereids.types.BooleanType; @@ -84,4 +85,9 @@ public R accept(ExpressionVisitor visitor, C context) { public List getSignatures() { return SIGNATURES; } + + @Override + public Expression resultForEmptyInput() { + return new BigIntLiteral(0); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Sum0.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Sum0.java index 1f63c53dabc004..fd052a69c0e90e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Sum0.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/agg/Sum0.java @@ -25,6 +25,10 @@ import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; import org.apache.doris.nereids.trees.expressions.functions.Function; import org.apache.doris.nereids.trees.expressions.functions.window.SupportWindowAnalytic; +import org.apache.doris.nereids.trees.expressions.literal.BigIntLiteral; +import org.apache.doris.nereids.trees.expressions.literal.DecimalV3Literal; +import org.apache.doris.nereids.trees.expressions.literal.DoubleLiteral; +import org.apache.doris.nereids.trees.expressions.literal.LargeIntLiteral; import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BigIntType; @@ -41,6 +45,8 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import java.math.BigDecimal; +import java.math.BigInteger; import java.util.List; /** @@ -127,4 +133,20 @@ public Function constructRollUp(Expression param, Expression... varParams) { public boolean canRollUp() { return true; } + + @Override + public Expression resultForEmptyInput() { + DataType dataType = getDataType(); + if (dataType.isBigIntType()) { + return new BigIntLiteral(0); + } else if (dataType.isLargeIntType()) { + return new LargeIntLiteral(new BigInteger("0")); + } else if (dataType.isDecimalV3Type()) { + return new DecimalV3Literal((DecimalV3Type) dataType, new BigDecimal("0")); + } else if (dataType.isDoubleType()) { + return new DoubleLiteral(0); + } else { + return new DoubleLiteral(0); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeAcquire.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeAcquire.java index 17403bd83c0770..98d2ebfaf769ba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeAcquire.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeAcquire.java @@ -36,12 +36,12 @@ public class DateTimeAcquire { /** * date acquire function: now */ - @ExecFunction(name = "now", argTypes = {}, returnType = "DATETIME") + @ExecFunction(name = "now") public static Expression now() { return DateTimeLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); } - @ExecFunction(name = "now", argTypes = {"INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "now") public static Expression now(IntegerLiteral precision) { return DateTimeV2Literal.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone()), precision.getValue()); @@ -50,12 +50,12 @@ public static Expression now(IntegerLiteral precision) { /** * date acquire function: current_timestamp */ - @ExecFunction(name = "current_timestamp", argTypes = {}, returnType = "DATETIME") + @ExecFunction(name = "current_timestamp") public static Expression currentTimestamp() { return DateTimeLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); } - @ExecFunction(name = "current_timestamp", argTypes = {"INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "current_timestamp") public static Expression currentTimestamp(IntegerLiteral precision) { return DateTimeV2Literal.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone()), precision.getValue()); } @@ -63,12 +63,12 @@ public static Expression currentTimestamp(IntegerLiteral precision) { /** * date acquire function: localtime/localtimestamp */ - @ExecFunction(name = "localtime", argTypes = {}, returnType = "DATETIME") + @ExecFunction(name = "localtime") public static Expression localTime() { return DateTimeLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); } - @ExecFunction(name = "localtimestamp", argTypes = {}, returnType = "DATETIME") + @ExecFunction(name = "localtimestamp") public static Expression localTimestamp() { return DateTimeV2Literal.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); } @@ -76,12 +76,12 @@ public static Expression localTimestamp() { /** * date acquire function: current_date */ - @ExecFunction(name = "curdate", argTypes = {}, returnType = "DATE") + @ExecFunction(name = "curdate") public static Expression curDate() { return DateLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); } - @ExecFunction(name = "current_date", argTypes = {}, returnType = "DATE") + @ExecFunction(name = "current_date") public static Expression currentDate() { return DateLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); } @@ -90,12 +90,12 @@ public static Expression currentDate() { // /** // * date acquire function: current_time // */ - // @ExecFunction(name = "curtime", argTypes = {}, returnType = "TIME") + // @ExecFunction(name = "curtime") // public static Expression curTime() { // return DateTimeLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); // } - // @ExecFunction(name = "current_time", argTypes = {}, returnType = "TIME") + // @ExecFunction(name = "current_time") // public static Expression currentTime() { // return DateTimeLiteral.fromJavaDateType(LocalDateTime.now(DateUtils.getTimeZone())); // } @@ -103,7 +103,7 @@ public static Expression currentDate() { /** * date transformation function: unix_timestamp */ - @ExecFunction(name = "unix_timestamp", argTypes = {}, returnType = "INT") + @ExecFunction(name = "unix_timestamp") public static Expression unixTimestamp() { return new IntegerLiteral((int) (System.currentTimeMillis() / 1000L)); } @@ -111,7 +111,7 @@ public static Expression unixTimestamp() { /** * date transformation function: utc_timestamp */ - @ExecFunction(name = "utc_timestamp", argTypes = {}, returnType = "INT") + @ExecFunction(name = "utc_timestamp") public static Expression utcTimestamp() { return DateTimeLiteral.fromJavaDateType(LocalDateTime.now(ZoneId.of("UTC+0"))); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeArithmetic.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeArithmetic.java index c10181a1040db4..15588871016b1f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeArithmetic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeArithmetic.java @@ -36,22 +36,22 @@ public class DateTimeArithmetic { /** * datetime arithmetic function date-add. */ - @ExecFunction(name = "date_add", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "date_add") public static Expression dateAdd(DateLiteral date, IntegerLiteral day) { return daysAdd(date, day); } - @ExecFunction(name = "date_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "date_add") public static Expression dateAdd(DateTimeLiteral date, IntegerLiteral day) { return daysAdd(date, day); } - @ExecFunction(name = "date_add", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "date_add") public static Expression dateAdd(DateV2Literal date, IntegerLiteral day) { return daysAdd(date, day); } - @ExecFunction(name = "date_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "date_add") public static Expression dateAdd(DateTimeV2Literal date, IntegerLiteral day) { return daysAdd(date, day); } @@ -59,22 +59,22 @@ public static Expression dateAdd(DateTimeV2Literal date, IntegerLiteral day) { /** * datetime arithmetic function date-sub. */ - @ExecFunction(name = "date_sub", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "date_sub") public static Expression dateSub(DateLiteral date, IntegerLiteral day) { return dateAdd(date, new IntegerLiteral(-day.getValue())); } - @ExecFunction(name = "date_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "date_sub") public static Expression dateSub(DateTimeLiteral date, IntegerLiteral day) { return dateAdd(date, new IntegerLiteral(-day.getValue())); } - @ExecFunction(name = "date_sub", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "date_sub") public static Expression dateSub(DateV2Literal date, IntegerLiteral day) { return dateAdd(date, new IntegerLiteral(-day.getValue())); } - @ExecFunction(name = "date_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "date_sub") public static Expression dateSub(DateTimeV2Literal date, IntegerLiteral day) { return dateAdd(date, new IntegerLiteral(-day.getValue())); } @@ -82,22 +82,22 @@ public static Expression dateSub(DateTimeV2Literal date, IntegerLiteral day) { /** * datetime arithmetic function years-add. */ - @ExecFunction(name = "years_add", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "years_add") public static Expression yearsAdd(DateLiteral date, IntegerLiteral year) { return date.plusYears(year.getValue()); } - @ExecFunction(name = "years_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "years_add") public static Expression yearsAdd(DateTimeLiteral date, IntegerLiteral year) { return date.plusYears(year.getValue()); } - @ExecFunction(name = "years_add", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "years_add") public static Expression yearsAdd(DateV2Literal date, IntegerLiteral year) { return date.plusYears(year.getValue()); } - @ExecFunction(name = "years_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "years_add") public static Expression yearsAdd(DateTimeV2Literal date, IntegerLiteral year) { return date.plusYears(year.getValue()); } @@ -105,22 +105,22 @@ public static Expression yearsAdd(DateTimeV2Literal date, IntegerLiteral year) { /** * datetime arithmetic function months-add. */ - @ExecFunction(name = "months_add", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "months_add") public static Expression monthsAdd(DateLiteral date, IntegerLiteral month) { return date.plusMonths(month.getValue()); } - @ExecFunction(name = "months_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "months_add") public static Expression monthsAdd(DateTimeLiteral date, IntegerLiteral month) { return date.plusMonths(month.getValue()); } - @ExecFunction(name = "months_add", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "months_add") public static Expression monthsAdd(DateV2Literal date, IntegerLiteral month) { return date.plusMonths(month.getValue()); } - @ExecFunction(name = "months_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "months_add") public static Expression monthsAdd(DateTimeV2Literal date, IntegerLiteral month) { return date.plusMonths(month.getValue()); } @@ -128,22 +128,22 @@ public static Expression monthsAdd(DateTimeV2Literal date, IntegerLiteral month) /** * datetime arithmetic function weeks-add. */ - @ExecFunction(name = "weeks_add", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "weeks_add") public static Expression weeksAdd(DateLiteral date, IntegerLiteral weeks) { return date.plusWeeks(weeks.getValue()); } - @ExecFunction(name = "weeks_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "weeks_add") public static Expression weeksAdd(DateTimeLiteral date, IntegerLiteral weeks) { return date.plusWeeks(weeks.getValue()); } - @ExecFunction(name = "weeks_add", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "weeks_add") public static Expression weeksAdd(DateV2Literal date, IntegerLiteral weeks) { return date.plusWeeks(weeks.getValue()); } - @ExecFunction(name = "weeks_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "weeks_add") public static Expression weeksAdd(DateTimeV2Literal date, IntegerLiteral weeks) { return date.plusWeeks(weeks.getValue()); } @@ -151,22 +151,22 @@ public static Expression weeksAdd(DateTimeV2Literal date, IntegerLiteral weeks) /** * datetime arithmetic function days-add. */ - @ExecFunction(name = "days_add", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "days_add") public static Expression daysAdd(DateLiteral date, IntegerLiteral day) { return date.plusDays(day.getValue()); } - @ExecFunction(name = "days_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "days_add") public static Expression daysAdd(DateTimeLiteral date, IntegerLiteral day) { return date.plusDays(day.getValue()); } - @ExecFunction(name = "days_add", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "days_add") public static Expression daysAdd(DateV2Literal date, IntegerLiteral day) { return date.plusDays(day.getValue()); } - @ExecFunction(name = "days_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "days_add") public static Expression daysAdd(DateTimeV2Literal date, IntegerLiteral day) { return date.plusDays(day.getValue()); } @@ -174,12 +174,12 @@ public static Expression daysAdd(DateTimeV2Literal date, IntegerLiteral day) { /** * datetime arithmetic function hours-add. */ - @ExecFunction(name = "hours_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "hours_add") public static Expression hoursAdd(DateTimeLiteral date, IntegerLiteral hour) { return date.plusHours(hour.getValue()); } - @ExecFunction(name = "hours_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hours_add") public static Expression hoursAdd(DateTimeV2Literal date, IntegerLiteral hour) { return date.plusHours(hour.getValue()); } @@ -187,12 +187,12 @@ public static Expression hoursAdd(DateTimeV2Literal date, IntegerLiteral hour) { /** * datetime arithmetic function minutes-add. */ - @ExecFunction(name = "minutes_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "minutes_add") public static Expression minutesAdd(DateTimeLiteral date, IntegerLiteral minute) { return date.plusMinutes(minute.getValue()); } - @ExecFunction(name = "minutes_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minutes_add") public static Expression minutesAdd(DateTimeV2Literal date, IntegerLiteral minute) { return date.plusMinutes(minute.getValue()); } @@ -200,12 +200,12 @@ public static Expression minutesAdd(DateTimeV2Literal date, IntegerLiteral minut /** * datetime arithmetic function seconds-add. */ - @ExecFunction(name = "seconds_add", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "seconds_add") public static Expression secondsAdd(DateTimeLiteral date, IntegerLiteral second) { return date.plusSeconds(second.getValue()); } - @ExecFunction(name = "seconds_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "seconds_add") public static Expression secondsAdd(DateTimeV2Literal date, IntegerLiteral second) { return date.plusSeconds(second.getValue()); } @@ -213,7 +213,7 @@ public static Expression secondsAdd(DateTimeV2Literal date, IntegerLiteral secon /** * datetime arithmetic function microseconds-add. */ - @ExecFunction(name = "microseconds_add", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "microseconds_add") public static Expression microSecondsAdd(DateTimeV2Literal date, IntegerLiteral microSecond) { return date.plusMicroSeconds(microSecond.getValue()); } @@ -221,7 +221,7 @@ public static Expression microSecondsAdd(DateTimeV2Literal date, IntegerLiteral /** * datetime arithmetic function microseconds_sub. */ - @ExecFunction(name = "microseconds_sub", argTypes = { "DATETIMEV2", "INT" }, returnType = "DATETIMEV2") + @ExecFunction(name = "microseconds_sub") public static Expression microSecondsSub(DateTimeV2Literal date, IntegerLiteral microSecond) { return date.plusMicroSeconds(-microSecond.getValue()); } @@ -229,7 +229,7 @@ public static Expression microSecondsSub(DateTimeV2Literal date, IntegerLiteral /** * datetime arithmetic function milliseconds_add. */ - @ExecFunction(name = "milliseconds_add", argTypes = { "DATETIMEV2", "INT" }, returnType = "DATETIMEV2") + @ExecFunction(name = "milliseconds_add") public static Expression milliSecondsAdd(DateTimeV2Literal date, IntegerLiteral milliSecond) { return date.plusMilliSeconds(milliSecond.getValue()); } @@ -237,7 +237,7 @@ public static Expression milliSecondsAdd(DateTimeV2Literal date, IntegerLiteral /** * datetime arithmetic function milliseconds_sub. */ - @ExecFunction(name = "milliseconds_sub", argTypes = { "DATETIMEV2", "INT" }, returnType = "DATETIMEV2") + @ExecFunction(name = "milliseconds_sub") public static Expression milliSecondsSub(DateTimeV2Literal date, IntegerLiteral milliSecond) { return date.plusMilliSeconds(-milliSecond.getValue()); } @@ -245,22 +245,22 @@ public static Expression milliSecondsSub(DateTimeV2Literal date, IntegerLiteral /** * datetime arithmetic function years-sub. */ - @ExecFunction(name = "years_sub", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "years_sub") public static Expression yearsSub(DateLiteral date, IntegerLiteral year) { return yearsAdd(date, new IntegerLiteral(-year.getValue())); } - @ExecFunction(name = "years_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "years_sub") public static Expression yearsSub(DateTimeLiteral date, IntegerLiteral year) { return yearsAdd(date, new IntegerLiteral(-year.getValue())); } - @ExecFunction(name = "years_sub", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "years_sub") public static Expression yearsSub(DateV2Literal date, IntegerLiteral year) { return yearsAdd(date, new IntegerLiteral(-year.getValue())); } - @ExecFunction(name = "years_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "years_sub") public static Expression yearsSub(DateTimeV2Literal date, IntegerLiteral year) { return yearsAdd(date, new IntegerLiteral(-year.getValue())); } @@ -268,22 +268,22 @@ public static Expression yearsSub(DateTimeV2Literal date, IntegerLiteral year) { /** * datetime arithmetic function months-sub */ - @ExecFunction(name = "months_sub", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "months_sub") public static Expression monthsSub(DateLiteral date, IntegerLiteral month) { return monthsAdd(date, new IntegerLiteral(-month.getValue())); } - @ExecFunction(name = "months_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "months_sub") public static Expression monthsSub(DateTimeLiteral date, IntegerLiteral month) { return monthsAdd(date, new IntegerLiteral(-month.getValue())); } - @ExecFunction(name = "months_sub", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "months_sub") public static Expression monthsSub(DateV2Literal date, IntegerLiteral month) { return monthsAdd(date, new IntegerLiteral(-month.getValue())); } - @ExecFunction(name = "months_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "months_sub") public static Expression monthsSub(DateTimeV2Literal date, IntegerLiteral month) { return monthsAdd(date, new IntegerLiteral(-month.getValue())); } @@ -291,22 +291,22 @@ public static Expression monthsSub(DateTimeV2Literal date, IntegerLiteral month) /** * datetime arithmetic function weeks-sub. */ - @ExecFunction(name = "weeks_sub", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "weeks_sub") public static Expression weeksSub(DateLiteral date, IntegerLiteral weeks) { return date.plusWeeks(-weeks.getValue()); } - @ExecFunction(name = "weeks_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "weeks_sub") public static Expression weeksSub(DateTimeLiteral date, IntegerLiteral weeks) { return date.plusWeeks(-weeks.getValue()); } - @ExecFunction(name = "weeks_sub", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "weeks_sub") public static Expression weeksSub(DateV2Literal date, IntegerLiteral weeks) { return date.plusWeeks(-weeks.getValue()); } - @ExecFunction(name = "weeks_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "weeks_sub") public static Expression weeksSub(DateTimeV2Literal date, IntegerLiteral weeks) { return date.plusWeeks(-weeks.getValue()); } @@ -314,22 +314,22 @@ public static Expression weeksSub(DateTimeV2Literal date, IntegerLiteral weeks) /** * datetime arithmetic function days-sub */ - @ExecFunction(name = "days_sub", argTypes = {"DATE", "INT"}, returnType = "DATE") + @ExecFunction(name = "days_sub") public static Expression daysSub(DateLiteral date, IntegerLiteral day) { return daysAdd(date, new IntegerLiteral(-day.getValue())); } - @ExecFunction(name = "days_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "days_sub") public static Expression daysSub(DateTimeLiteral date, IntegerLiteral day) { return daysAdd(date, new IntegerLiteral(-day.getValue())); } - @ExecFunction(name = "days_sub", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "days_sub") public static Expression daysSub(DateV2Literal date, IntegerLiteral day) { return daysAdd(date, new IntegerLiteral(-day.getValue())); } - @ExecFunction(name = "days_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "days_sub") public static Expression daysSub(DateTimeV2Literal date, IntegerLiteral day) { return daysAdd(date, new IntegerLiteral(-day.getValue())); } @@ -337,12 +337,12 @@ public static Expression daysSub(DateTimeV2Literal date, IntegerLiteral day) { /** * datetime arithmetic function hours-sub */ - @ExecFunction(name = "hours_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "hours_sub") public static Expression hoursSub(DateTimeLiteral date, IntegerLiteral hour) { return hoursAdd(date, new IntegerLiteral(-hour.getValue())); } - @ExecFunction(name = "hours_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hours_sub") public static Expression hoursSub(DateTimeV2Literal date, IntegerLiteral hour) { return hoursAdd(date, new IntegerLiteral(-hour.getValue())); } @@ -350,12 +350,12 @@ public static Expression hoursSub(DateTimeV2Literal date, IntegerLiteral hour) { /** * datetime arithmetic function minutes-sub */ - @ExecFunction(name = "minutes_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "minutes_sub") public static Expression minutesSub(DateTimeLiteral date, IntegerLiteral minute) { return minutesAdd(date, new IntegerLiteral(-minute.getValue())); } - @ExecFunction(name = "minutes_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minutes_sub") public static Expression minutesSub(DateTimeV2Literal date, IntegerLiteral minute) { return minutesAdd(date, new IntegerLiteral(-minute.getValue())); } @@ -363,12 +363,12 @@ public static Expression minutesSub(DateTimeV2Literal date, IntegerLiteral minut /** * datetime arithmetic function seconds-sub */ - @ExecFunction(name = "seconds_sub", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "seconds_sub") public static Expression secondsSub(DateTimeLiteral date, IntegerLiteral second) { return secondsAdd(date, new IntegerLiteral(-second.getValue())); } - @ExecFunction(name = "seconds_sub", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "seconds_sub") public static Expression secondsSub(DateTimeV2Literal date, IntegerLiteral second) { return secondsAdd(date, new IntegerLiteral(-second.getValue())); } @@ -376,27 +376,27 @@ public static Expression secondsSub(DateTimeV2Literal date, IntegerLiteral secon /** * datetime arithmetic function datediff */ - @ExecFunction(name = "datediff", argTypes = {"DATETIME", "DATETIME"}, returnType = "INT") + @ExecFunction(name = "datediff") public static Expression dateDiff(DateTimeLiteral date1, DateTimeLiteral date2) { return new IntegerLiteral(dateDiff(date1.toJavaDateType(), date2.toJavaDateType())); } - @ExecFunction(name = "datediff", argTypes = {"DATEV2", "DATEV2"}, returnType = "INT") + @ExecFunction(name = "datediff") public static Expression dateDiff(DateV2Literal date1, DateV2Literal date2) { return new IntegerLiteral(dateDiff(date1.toJavaDateType(), date2.toJavaDateType())); } - @ExecFunction(name = "datediff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "INT") + @ExecFunction(name = "datediff") public static Expression dateDiff(DateV2Literal date1, DateTimeV2Literal date2) { return new IntegerLiteral(dateDiff(date1.toJavaDateType(), date2.toJavaDateType())); } - @ExecFunction(name = "datediff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "INT") + @ExecFunction(name = "datediff") public static Expression dateDiff(DateTimeV2Literal date1, DateV2Literal date2) { return new IntegerLiteral(dateDiff(date1.toJavaDateType(), date2.toJavaDateType())); } - @ExecFunction(name = "datediff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "INT") + @ExecFunction(name = "datediff") public static Expression dateDiff(DateTimeV2Literal date1, DateTimeV2Literal date2) { return new IntegerLiteral(dateDiff(date1.toJavaDateType(), date2.toJavaDateType())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeExtractAndTransform.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeExtractAndTransform.java index 9742602a07a249..440f93cac598a2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeExtractAndTransform.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/DateTimeExtractAndTransform.java @@ -67,7 +67,7 @@ public class DateTimeExtractAndTransform { /** * datetime arithmetic function date-v2 */ - @ExecFunction(name = "datev2", argTypes = {"DATETIMEV2"}, returnType = "DATEV2") + @ExecFunction(name = "datev2") public static Expression dateV2(DateTimeV2Literal dateTime) { return new DateV2Literal(dateTime.getYear(), dateTime.getMonth(), dateTime.getDay()); } @@ -75,22 +75,22 @@ public static Expression dateV2(DateTimeV2Literal dateTime) { /** * Executable datetime extract year */ - @ExecFunction(name = "year", argTypes = {"DATE"}, returnType = "SMALLINT") + @ExecFunction(name = "year") public static Expression year(DateLiteral date) { return new SmallIntLiteral(((short) date.getYear())); } - @ExecFunction(name = "year", argTypes = {"DATETIME"}, returnType = "SMALLINT") + @ExecFunction(name = "year") public static Expression year(DateTimeLiteral date) { return new SmallIntLiteral(((short) date.getYear())); } - @ExecFunction(name = "year", argTypes = {"DATEV2"}, returnType = "SMALLINT") + @ExecFunction(name = "year") public static Expression year(DateV2Literal date) { return new SmallIntLiteral(((short) date.getYear())); } - @ExecFunction(name = "year", argTypes = {"DATETIMEV2"}, returnType = "SMALLINT") + @ExecFunction(name = "year") public static Expression year(DateTimeV2Literal date) { return new SmallIntLiteral(((short) date.getYear())); } @@ -98,22 +98,22 @@ public static Expression year(DateTimeV2Literal date) { /** * Executable datetime extract quarter */ - @ExecFunction(name = "quarter", argTypes = {"DATE"}, returnType = "TINYINT") + @ExecFunction(name = "quarter") public static Expression quarter(DateLiteral date) { return new TinyIntLiteral((byte) (((byte) date.getMonth() - 1) / 3 + 1)); } - @ExecFunction(name = "quarter", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "quarter") public static Expression quarter(DateTimeLiteral date) { return new TinyIntLiteral((byte) ((date.getMonth() - 1) / 3 + 1)); } - @ExecFunction(name = "quarter", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "quarter") public static Expression quarter(DateV2Literal date) { return new TinyIntLiteral((byte) ((date.getMonth() - 1) / 3 + 1)); } - @ExecFunction(name = "quarter", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "quarter") public static Expression quarter(DateTimeV2Literal date) { return new TinyIntLiteral((byte) ((date.getMonth() - 1) / 3 + 1)); } @@ -121,22 +121,22 @@ public static Expression quarter(DateTimeV2Literal date) { /** * Executable datetime extract month */ - @ExecFunction(name = "month", argTypes = {"DATE"}, returnType = "TINYINT") + @ExecFunction(name = "month") public static Expression month(DateLiteral date) { return new TinyIntLiteral((byte) date.getMonth()); } - @ExecFunction(name = "month", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "month") public static Expression month(DateTimeLiteral date) { return new TinyIntLiteral((byte) date.getMonth()); } - @ExecFunction(name = "month", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "month") public static Expression month(DateV2Literal date) { return new TinyIntLiteral((byte) date.getMonth()); } - @ExecFunction(name = "month", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "month") public static Expression month(DateTimeV2Literal date) { return new TinyIntLiteral((byte) date.getMonth()); } @@ -144,22 +144,22 @@ public static Expression month(DateTimeV2Literal date) { /** * Executable datetime extract day */ - @ExecFunction(name = "day", argTypes = {"DATE"}, returnType = "TINYINT") + @ExecFunction(name = "day") public static Expression day(DateLiteral date) { return new TinyIntLiteral((byte) date.getDay()); } - @ExecFunction(name = "day", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "day") public static Expression day(DateTimeLiteral date) { return new TinyIntLiteral((byte) date.getDay()); } - @ExecFunction(name = "day", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "day") public static Expression day(DateV2Literal date) { return new TinyIntLiteral((byte) date.getDay()); } - @ExecFunction(name = "day", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "day") public static Expression day(DateTimeV2Literal date) { return new TinyIntLiteral((byte) date.getDay()); } @@ -167,12 +167,12 @@ public static Expression day(DateTimeV2Literal date) { /** * Executable datetime extract hour */ - @ExecFunction(name = "hour", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "hour") public static Expression hour(DateTimeLiteral date) { return new TinyIntLiteral(((byte) date.getHour())); } - @ExecFunction(name = "hour", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "hour") public static Expression hour(DateTimeV2Literal date) { return new TinyIntLiteral(((byte) date.getHour())); } @@ -180,12 +180,12 @@ public static Expression hour(DateTimeV2Literal date) { /** * Executable datetime extract hour */ - @ExecFunction(name = "minute", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "minute") public static Expression minute(DateTimeLiteral date) { return new TinyIntLiteral(((byte) date.getMinute())); } - @ExecFunction(name = "minute", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "minute") public static Expression minute(DateTimeV2Literal date) { return new TinyIntLiteral(((byte) date.getMinute())); } @@ -193,12 +193,12 @@ public static Expression minute(DateTimeV2Literal date) { /** * Executable datetime extract second */ - @ExecFunction(name = "second", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "second") public static Expression second(DateTimeLiteral date) { return new TinyIntLiteral(((byte) date.getSecond())); } - @ExecFunction(name = "second", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "second") public static Expression second(DateTimeV2Literal date) { return new TinyIntLiteral(((byte) date.getSecond())); } @@ -206,7 +206,7 @@ public static Expression second(DateTimeV2Literal date) { /** * Executable datetime extract microsecond */ - @ExecFunction(name = "microsecond", argTypes = {"DATETIMEV2"}, returnType = "INT") + @ExecFunction(name = "microsecond") public static Expression microsecond(DateTimeV2Literal date) { return new IntegerLiteral(((int) date.getMicroSecond())); } @@ -214,22 +214,22 @@ public static Expression microsecond(DateTimeV2Literal date) { /** * Executable datetime extract dayofyear */ - @ExecFunction(name = "dayofyear", argTypes = {"DATE"}, returnType = "SMALLINT") + @ExecFunction(name = "dayofyear") public static Expression dayOfYear(DateLiteral date) { return new SmallIntLiteral((short) date.getDayOfYear()); } - @ExecFunction(name = "dayofyear", argTypes = {"DATETIME"}, returnType = "SMALLINT") + @ExecFunction(name = "dayofyear") public static Expression dayOfYear(DateTimeLiteral date) { return new SmallIntLiteral((short) date.getDayOfYear()); } - @ExecFunction(name = "dayofyear", argTypes = {"DATEV2"}, returnType = "SMALLINT") + @ExecFunction(name = "dayofyear") public static Expression dayOfYear(DateV2Literal date) { return new SmallIntLiteral((short) date.getDayOfYear()); } - @ExecFunction(name = "dayofyear", argTypes = {"DATETIMEV2"}, returnType = "SMALLINT") + @ExecFunction(name = "dayofyear") public static Expression dayOfYear(DateTimeV2Literal date) { return new SmallIntLiteral((short) date.getDayOfYear()); } @@ -237,22 +237,22 @@ public static Expression dayOfYear(DateTimeV2Literal date) { /** * Executable datetime extract dayofmonth */ - @ExecFunction(name = "dayofmonth", argTypes = {"DATE"}, returnType = "TINYINT") + @ExecFunction(name = "dayofmonth") public static Expression dayOfMonth(DateLiteral date) { return new TinyIntLiteral((byte) date.toJavaDateType().getDayOfMonth()); } - @ExecFunction(name = "dayofmonth", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "dayofmonth") public static Expression dayOfMonth(DateTimeLiteral date) { return new TinyIntLiteral((byte) date.toJavaDateType().getDayOfMonth()); } - @ExecFunction(name = "dayofmonth", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "dayofmonth") public static Expression dayOfMonth(DateV2Literal date) { return new TinyIntLiteral((byte) date.toJavaDateType().getDayOfMonth()); } - @ExecFunction(name = "dayofmonth", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "dayofmonth") public static Expression dayOfMonth(DateTimeV2Literal date) { return new TinyIntLiteral((byte) date.toJavaDateType().getDayOfMonth()); } @@ -260,22 +260,22 @@ public static Expression dayOfMonth(DateTimeV2Literal date) { /** * Executable datetime extract dayofweek */ - @ExecFunction(name = "dayofweek", argTypes = {"DATE"}, returnType = "TINYINT") + @ExecFunction(name = "dayofweek") public static Expression dayOfWeek(DateLiteral date) { return new TinyIntLiteral((byte) (date.getDayOfWeek() % 7 + 1)); } - @ExecFunction(name = "dayofweek", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "dayofweek") public static Expression dayOfWeek(DateTimeLiteral date) { return new TinyIntLiteral((byte) (date.getDayOfWeek() % 7 + 1)); } - @ExecFunction(name = "dayofweek", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "dayofweek") public static Expression dayOfWeek(DateV2Literal date) { return new TinyIntLiteral((byte) (date.getDayOfWeek() % 7 + 1)); } - @ExecFunction(name = "dayofweek", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "dayofweek") public static Expression dayOfWeek(DateTimeV2Literal date) { return new TinyIntLiteral((byte) (date.getDayOfWeek() % 7 + 1)); } @@ -291,26 +291,26 @@ private static LocalDateTime firstDayOfWeek(LocalDateTime dateTime) { /** * datetime arithmetic function date-format */ - @ExecFunction(name = "date_format", argTypes = {"DATE", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "date_format") public static Expression dateFormat(DateLiteral date, StringLikeLiteral format) { return new VarcharLiteral(DateUtils.formatBuilder(format.getValue()).toFormatter().format( java.time.LocalDate.of(((int) date.getYear()), ((int) date.getMonth()), ((int) date.getDay())))); } - @ExecFunction(name = "date_format", argTypes = {"DATETIME", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "date_format") public static Expression dateFormat(DateTimeLiteral date, StringLikeLiteral format) { return new VarcharLiteral(DateUtils.formatBuilder(format.getValue()).toFormatter().format( java.time.LocalDateTime.of(((int) date.getYear()), ((int) date.getMonth()), ((int) date.getDay()), ((int) date.getHour()), ((int) date.getMinute()), ((int) date.getSecond())))); } - @ExecFunction(name = "date_format", argTypes = {"DATEV2", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "date_format") public static Expression dateFormat(DateV2Literal date, StringLikeLiteral format) { return new VarcharLiteral(DateUtils.formatBuilder(format.getValue()).toFormatter().format( java.time.LocalDate.of(((int) date.getYear()), ((int) date.getMonth()), ((int) date.getDay())))); } - @ExecFunction(name = "date_format", argTypes = {"DATETIMEV2", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "date_format") public static Expression dateFormat(DateTimeV2Literal date, StringLikeLiteral format) { return new VarcharLiteral(DateUtils.formatBuilder(format.getValue()).toFormatter().format( java.time.LocalDateTime.of(((int) date.getYear()), ((int) date.getMonth()), ((int) date.getDay()), @@ -320,12 +320,12 @@ public static Expression dateFormat(DateTimeV2Literal date, StringLikeLiteral fo /** * datetime arithmetic function date */ - @ExecFunction(name = "date", argTypes = {"DATETIME"}, returnType = "DATE") + @ExecFunction(name = "date") public static Expression date(DateTimeLiteral dateTime) throws AnalysisException { return new DateLiteral(dateTime.getYear(), dateTime.getMonth(), dateTime.getDay()); } - @ExecFunction(name = "date", argTypes = {"DATETIMEV2"}, returnType = "DATEV2") + @ExecFunction(name = "date") public static Expression date(DateTimeV2Literal dateTime) throws AnalysisException { return new DateV2Literal(dateTime.getYear(), dateTime.getMonth(), dateTime.getDay()); } @@ -333,22 +333,22 @@ public static Expression date(DateTimeV2Literal dateTime) throws AnalysisExcepti /** * datetime arithmetic function date-trunc */ - @ExecFunction(name = "date_trunc", argTypes = {"DATETIME", "VARCHAR"}, returnType = "DATETIME") + @ExecFunction(name = "date_trunc") public static Expression dateTrunc(DateTimeLiteral date, StringLikeLiteral trunc) { return DateTimeLiteral.fromJavaDateType(dateTruncHelper(date.toJavaDateType(), trunc.getValue())); } - @ExecFunction(name = "date_trunc", argTypes = {"DATETIMEV2", "VARCHAR"}, returnType = "DATETIMEV2") + @ExecFunction(name = "date_trunc") public static Expression dateTrunc(DateTimeV2Literal date, StringLikeLiteral trunc) { return DateTimeV2Literal.fromJavaDateType(dateTruncHelper(date.toJavaDateType(), trunc.getValue())); } - @ExecFunction(name = "date_trunc", argTypes = {"DATE", "VARCHAR"}, returnType = "DATE") + @ExecFunction(name = "date_trunc") public static Expression dateTrunc(DateLiteral date, StringLikeLiteral trunc) { return DateLiteral.fromJavaDateType(dateTruncHelper(date.toJavaDateType(), trunc.getValue())); } - @ExecFunction(name = "date_trunc", argTypes = {"DATEV2", "VARCHAR"}, returnType = "DATEV2") + @ExecFunction(name = "date_trunc") public static Expression dateTrunc(DateV2Literal date, StringLikeLiteral trunc) { return DateV2Literal.fromJavaDateType(dateTruncHelper(date.toJavaDateType(), trunc.getValue())); } @@ -395,7 +395,7 @@ private static LocalDateTime dateTruncHelper(LocalDateTime dateTime, String trun /** * from_days. */ - @ExecFunction(name = "from_days", argTypes = {"INT"}, returnType = "DATEV2") + @ExecFunction(name = "from_days") public static Expression fromDays(IntegerLiteral n) { // doris treat 0000AD as ordinary year but java LocalDateTime treat it as lunar year. LocalDateTime res = LocalDateTime.of(0, 1, 1, 0, 0, 0) @@ -406,28 +406,28 @@ public static Expression fromDays(IntegerLiteral n) { return DateV2Literal.fromJavaDateType(res); } - @ExecFunction(name = "last_day", argTypes = {"DATE"}, returnType = "DATE") + @ExecFunction(name = "last_day") public static Expression lastDay(DateLiteral date) { LocalDateTime nextMonthFirstDay = LocalDateTime.of((int) date.getYear(), (int) date.getMonth(), 1, 0, 0, 0).plusMonths(1); return DateLiteral.fromJavaDateType(nextMonthFirstDay.minusDays(1)); } - @ExecFunction(name = "last_day", argTypes = {"DATETIME"}, returnType = "DATE") + @ExecFunction(name = "last_day") public static Expression lastDay(DateTimeLiteral date) { LocalDateTime nextMonthFirstDay = LocalDateTime.of((int) date.getYear(), (int) date.getMonth(), 1, 0, 0, 0).plusMonths(1); return DateLiteral.fromJavaDateType(nextMonthFirstDay.minusDays(1)); } - @ExecFunction(name = "last_day", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "last_day") public static Expression lastDay(DateV2Literal date) { LocalDateTime nextMonthFirstDay = LocalDateTime.of((int) date.getYear(), (int) date.getMonth(), 1, 0, 0, 0).plusMonths(1); return DateV2Literal.fromJavaDateType(nextMonthFirstDay.minusDays(1)); } - @ExecFunction(name = "last_day", argTypes = {"DATETIMEV2"}, returnType = "DATEV2") + @ExecFunction(name = "last_day") public static Expression lastDay(DateTimeV2Literal date) { LocalDateTime nextMonthFirstDay = LocalDateTime.of((int) date.getYear(), (int) date.getMonth(), 1, 0, 0, 0).plusMonths(1); @@ -437,22 +437,22 @@ public static Expression lastDay(DateTimeV2Literal date) { /** * datetime transformation function: to_monday */ - @ExecFunction(name = "to_monday", argTypes = {"DATE"}, returnType = "DATE") + @ExecFunction(name = "to_monday") public static Expression toMonday(DateLiteral date) { return DateLiteral.fromJavaDateType(toMonday(date.toJavaDateType())); } - @ExecFunction(name = "to_monday", argTypes = {"DATETIME"}, returnType = "DATE") + @ExecFunction(name = "to_monday") public static Expression toMonday(DateTimeLiteral date) { return DateLiteral.fromJavaDateType(toMonday(date.toJavaDateType())); } - @ExecFunction(name = "to_monday", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "to_monday") public static Expression toMonday(DateV2Literal date) { return DateV2Literal.fromJavaDateType(toMonday(date.toJavaDateType())); } - @ExecFunction(name = "to_monday", argTypes = {"DATETIMEV2"}, returnType = "DATEV2") + @ExecFunction(name = "to_monday") public static Expression toMonday(DateTimeV2Literal date) { return DateV2Literal.fromJavaDateType(toMonday(date.toJavaDateType())); } @@ -469,7 +469,7 @@ private static LocalDateTime toMonday(LocalDateTime dateTime) { /** * date transformation function: from_unixtime */ - @ExecFunction(name = "from_unixtime", argTypes = {"BIGINT"}, returnType = "VARCHAR") + @ExecFunction(name = "from_unixtime") public static Expression fromUnixTime(BigIntLiteral second) { return fromUnixTime(second, new VarcharLiteral("%Y-%m-%d %H:%i:%s")); } @@ -477,7 +477,7 @@ public static Expression fromUnixTime(BigIntLiteral second) { /** * date transformation function: from_unixtime */ - @ExecFunction(name = "from_unixtime", argTypes = {"BIGINT", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "from_unixtime") public static Expression fromUnixTime(BigIntLiteral second, StringLikeLiteral format) { // 32536771199L is max valid timestamp of mysql from_unix_time if (second.getValue() < 0 || second.getValue() > 32536771199L) { @@ -497,17 +497,17 @@ public static Expression fromUnixTime(BigIntLiteral second, StringLikeLiteral fo /** * date transformation function: unix_timestamp */ - @ExecFunction(name = "unix_timestamp", argTypes = {"DATE"}, returnType = "INT") + @ExecFunction(name = "unix_timestamp") public static Expression unixTimestamp(DateLiteral date) { return new IntegerLiteral(Integer.parseInt(getTimestamp(date.toJavaDateType()))); } - @ExecFunction(name = "unix_timestamp", argTypes = {"DATETIME"}, returnType = "INT") + @ExecFunction(name = "unix_timestamp") public static Expression unixTimestamp(DateTimeLiteral date) { return new IntegerLiteral(Integer.parseInt(getTimestamp(date.toJavaDateType()))); } - @ExecFunction(name = "unix_timestamp", argTypes = {"DATEV2"}, returnType = "INT") + @ExecFunction(name = "unix_timestamp") public static Expression unixTimestamp(DateV2Literal date) { return new IntegerLiteral(Integer.parseInt(getTimestamp(date.toJavaDateType()))); } @@ -515,7 +515,7 @@ public static Expression unixTimestamp(DateV2Literal date) { /** * date transformation function: unix_timestamp */ - @ExecFunction(name = "unix_timestamp", argTypes = {"DATETIMEV2"}, returnType = "DECIMALV3") + @ExecFunction(name = "unix_timestamp") public static Expression unixTimestamp(DateTimeV2Literal date) { if (date.getMicroSecond() == 0) { return new DecimalV3Literal(DecimalV3Type.createDecimalV3TypeLooseCheck(10, 0), @@ -529,7 +529,7 @@ public static Expression unixTimestamp(DateTimeV2Literal date) { /** * date transformation function: unix_timestamp */ - @ExecFunction(name = "unix_timestamp", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "DECIMALV3") + @ExecFunction(name = "unix_timestamp") public static Expression unixTimestamp(StringLikeLiteral date, StringLikeLiteral format) { DateTimeFormatter formatter = DateUtils.formatBuilder(format.getValue()).toFormatter(); LocalDateTime dateObj; @@ -564,12 +564,12 @@ private static String getTimestamp(LocalDateTime dateTime) { /** * date transformation function: to_date */ - @ExecFunction(name = "to_date", argTypes = {"DATETIME"}, returnType = "DATE") + @ExecFunction(name = "to_date") public static Expression toDate(DateTimeLiteral date) { return new DateLiteral(date.getYear(), date.getMonth(), date.getDay()); } - @ExecFunction(name = "to_date", argTypes = {"DATETIMEV2"}, returnType = "DATEV2") + @ExecFunction(name = "to_date") public static Expression toDate(DateTimeV2Literal date) { return new DateV2Literal(date.getYear(), date.getMonth(), date.getDay()); } @@ -577,25 +577,25 @@ public static Expression toDate(DateTimeV2Literal date) { /** * date transformation function: to_days */ - @ExecFunction(name = "to_days", argTypes = {"DATE"}, returnType = "INT") + @ExecFunction(name = "to_days") public static Expression toDays(DateLiteral date) { return new IntegerLiteral(((int) Duration.between( LocalDateTime.of(0, 1, 1, 0, 0, 0), date.toJavaDateType()).toDays())); } - @ExecFunction(name = "to_days", argTypes = {"DATETIME"}, returnType = "INT") + @ExecFunction(name = "to_days") public static Expression toDays(DateTimeLiteral date) { return new IntegerLiteral(((int) Duration.between( LocalDateTime.of(0, 1, 1, 0, 0, 0), date.toJavaDateType()).toDays())); } - @ExecFunction(name = "to_days", argTypes = {"DATEV2"}, returnType = "INT") + @ExecFunction(name = "to_days") public static Expression toDays(DateV2Literal date) { return new IntegerLiteral(((int) Duration.between( LocalDateTime.of(0, 1, 1, 0, 0, 0), date.toJavaDateType()).toDays())); } - @ExecFunction(name = "to_days", argTypes = {"DATETIMEV2"}, returnType = "INT") + @ExecFunction(name = "to_days") public static Expression toDays(DateTimeV2Literal date) { return new IntegerLiteral(((int) Duration.between( LocalDateTime.of(0, 1, 1, 0, 0, 0), date.toJavaDateType()).toDays())); @@ -604,7 +604,7 @@ public static Expression toDays(DateTimeV2Literal date) { /** * date transformation function: makedate */ - @ExecFunction(name = "makedate", argTypes = {"INT", "INT"}, returnType = "DATE") + @ExecFunction(name = "makedate") public static Expression makeDate(IntegerLiteral year, IntegerLiteral dayOfYear) { int day = dayOfYear.getValue(); return day > 0 ? DateLiteral.fromJavaDateType(LocalDateTime.of(year.getValue(), 1, 1, 0, 0, 0) @@ -614,7 +614,7 @@ public static Expression makeDate(IntegerLiteral year, IntegerLiteral dayOfYear) /** * date transformation function: str_to_date */ - @ExecFunction(name = "str_to_date", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "DATETIMEV2") + @ExecFunction(name = "str_to_date") public static Expression strToDate(StringLikeLiteral str, StringLikeLiteral format) { if (org.apache.doris.analysis.DateLiteral.hasTimePart(format.getStringValue())) { DataType returnType = DataType.fromCatalogType(ScalarType.getDefaultDateType(Type.DATETIME)); @@ -637,12 +637,12 @@ public static Expression strToDate(StringLikeLiteral str, StringLikeLiteral form } } - @ExecFunction(name = "timestamp", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "timestamp") public static Expression timestamp(DateTimeLiteral datetime) { return datetime; } - @ExecFunction(name = "timestamp", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "timestamp") public static Expression timestamp(DateTimeV2Literal datetime) { return datetime; } @@ -650,7 +650,7 @@ public static Expression timestamp(DateTimeV2Literal datetime) { /** * convert_tz */ - @ExecFunction(name = "convert_tz", argTypes = {"DATETIMEV2", "VARCHAR", "VARCHAR"}, returnType = "DATETIMEV2") + @ExecFunction(name = "convert_tz") public static Expression convertTz(DateTimeV2Literal datetime, StringLikeLiteral fromTz, StringLikeLiteral toTz) { DateTimeFormatter zoneFormatter = new DateTimeFormatterBuilder() .parseCaseInsensitive() @@ -665,52 +665,52 @@ public static Expression convertTz(DateTimeV2Literal datetime, StringLikeLiteral return DateTimeV2Literal.fromJavaDateType(resultDateTime.toLocalDateTime(), datetime.getDataType().getScale()); } - @ExecFunction(name = "weekday", argTypes = {"DATE"}, returnType = "TINYINT") + @ExecFunction(name = "weekday") public static Expression weekDay(DateLiteral date) { return new TinyIntLiteral((byte) ((date.toJavaDateType().getDayOfWeek().getValue() + 6) % 7)); } - @ExecFunction(name = "weekday", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "weekday") public static Expression weekDay(DateTimeLiteral date) { return new TinyIntLiteral((byte) ((date.toJavaDateType().getDayOfWeek().getValue() + 6) % 7)); } - @ExecFunction(name = "weekday", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "weekday") public static Expression weekDay(DateV2Literal date) { return new TinyIntLiteral((byte) ((date.toJavaDateType().getDayOfWeek().getValue() + 6) % 7)); } - @ExecFunction(name = "weekday", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "weekday") public static Expression weekDay(DateTimeV2Literal date) { return new TinyIntLiteral((byte) ((date.toJavaDateType().getDayOfWeek().getValue() + 6) % 7)); } - @ExecFunction(name = "week", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "week") public static Expression week(DateTimeV2Literal dateTime) { return week(dateTime.toJavaDateType(), 0); } - @ExecFunction(name = "week", argTypes = {"DATETIMEV2", "INT"}, returnType = "TINYINT") + @ExecFunction(name = "week") public static Expression week(DateTimeV2Literal dateTime, IntegerLiteral mode) { return week(dateTime.toJavaDateType(), mode.getIntValue()); } - @ExecFunction(name = "week", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "week") public static Expression week(DateTimeLiteral dateTime) { return week(dateTime.toJavaDateType(), 0); } - @ExecFunction(name = "week", argTypes = {"DATETIME", "INT"}, returnType = "TINYINT") + @ExecFunction(name = "week") public static Expression week(DateTimeLiteral dateTime, IntegerLiteral mode) { return week(dateTime.toJavaDateType(), mode.getIntValue()); } - @ExecFunction(name = "week", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "week") public static Expression week(DateV2Literal date) { return week(date.toJavaDateType(), 0); } - @ExecFunction(name = "week", argTypes = {"DATEV2", "INT"}, returnType = "TINYINT") + @ExecFunction(name = "week") public static Expression week(DateV2Literal date, IntegerLiteral mode) { return week(date.toJavaDateType(), mode.getIntValue()); } @@ -775,32 +775,32 @@ private static boolean isSpecificDate(LocalDateTime localDateTime) { && (localDateTime.getDayOfMonth() == 1 || localDateTime.getDayOfMonth() == 2); } - @ExecFunction(name = "yearweek", argTypes = {"DATEV2", "INT"}, returnType = "INT") + @ExecFunction(name = "yearweek") public static Expression yearWeek(DateV2Literal date, IntegerLiteral mode) { return yearWeek(date.toJavaDateType(), mode.getIntValue()); } - @ExecFunction(name = "yearweek", argTypes = {"DATETIMEV2", "INT"}, returnType = "INT") + @ExecFunction(name = "yearweek") public static Expression yearWeek(DateTimeV2Literal dateTime, IntegerLiteral mode) { return yearWeek(dateTime.toJavaDateType(), mode.getIntValue()); } - @ExecFunction(name = "yearweek", argTypes = {"DATETIME", "INT"}, returnType = "INT") + @ExecFunction(name = "yearweek") public static Expression yearWeek(DateTimeLiteral dateTime, IntegerLiteral mode) { return yearWeek(dateTime.toJavaDateType(), mode.getIntValue()); } - @ExecFunction(name = "yearweek", argTypes = {"DATEV2"}, returnType = "INT") + @ExecFunction(name = "yearweek") public static Expression yearWeek(DateV2Literal date) { return yearWeek(date.toJavaDateType(), 0); } - @ExecFunction(name = "yearweek", argTypes = {"DATETIMEV2"}, returnType = "INT") + @ExecFunction(name = "yearweek") public static Expression yearWeek(DateTimeV2Literal dateTime) { return yearWeek(dateTime.toJavaDateType(), 0); } - @ExecFunction(name = "yearweek", argTypes = {"DATETIME"}, returnType = "INT") + @ExecFunction(name = "yearweek") public static Expression yearWeek(DateTimeLiteral dateTime) { return yearWeek(dateTime.toJavaDateType(), 0); } @@ -868,7 +868,7 @@ public static Expression yearWeek(LocalDateTime localDateTime, int mode) { /** * weekofyear */ - @ExecFunction(name = "weekofyear", argTypes = {"DATETIMEV2"}, returnType = "TINYINT") + @ExecFunction(name = "weekofyear") public static Expression weekOfYear(DateTimeV2Literal dateTime) { if (dateTime.getYear() == 0 && dateTime.getDayOfWeek() == 1) { if (dateTime.getMonth() == 1 && dateTime.getDay() == 2) { @@ -883,7 +883,7 @@ public static Expression weekOfYear(DateTimeV2Literal dateTime) { /** * weekofyear */ - @ExecFunction(name = "weekofyear", argTypes = {"DATETIME"}, returnType = "TINYINT") + @ExecFunction(name = "weekofyear") public static Expression weekOfYear(DateTimeLiteral dateTime) { if (dateTime.getYear() == 0 && dateTime.getDayOfWeek() == 1) { if (dateTime.getMonth() == 1 && dateTime.getDay() == 2) { @@ -898,7 +898,7 @@ public static Expression weekOfYear(DateTimeLiteral dateTime) { /** * weekofyear */ - @ExecFunction(name = "weekofyear", argTypes = {"DATEV2"}, returnType = "TINYINT") + @ExecFunction(name = "weekofyear") public static Expression weekOfYear(DateV2Literal date) { if (date.getYear() == 0 && date.getDayOfWeek() == 1) { if (date.getMonth() == 1 && date.getDay() == 2) { @@ -909,53 +909,53 @@ public static Expression weekOfYear(DateV2Literal date) { return new TinyIntLiteral((byte) date.toJavaDateType().get(WeekFields.ISO.weekOfWeekBasedYear())); } - @ExecFunction(name = "dayname", argTypes = {"DATETIMEV2"}, returnType = "VARCHAR") + @ExecFunction(name = "dayname") public static Expression dayName(DateTimeV2Literal dateTime) { return new VarcharLiteral(dateTime.toJavaDateType().getDayOfWeek().getDisplayName(TextStyle.FULL, Locale.getDefault())); } - @ExecFunction(name = "dayname", argTypes = {"DATETIME"}, returnType = "VARCHAR") + @ExecFunction(name = "dayname") public static Expression dayName(DateTimeLiteral dateTime) { return new VarcharLiteral(dateTime.toJavaDateType().getDayOfWeek().getDisplayName(TextStyle.FULL, Locale.getDefault())); } - @ExecFunction(name = "dayname", argTypes = {"DATEV2"}, returnType = "VARCHAR") + @ExecFunction(name = "dayname") public static Expression dayName(DateV2Literal date) { return new VarcharLiteral(date.toJavaDateType().getDayOfWeek().getDisplayName(TextStyle.FULL, Locale.getDefault())); } - @ExecFunction(name = "monthname", argTypes = {"DATETIMEV2"}, returnType = "VARCHAR") + @ExecFunction(name = "monthname") public static Expression monthName(DateTimeV2Literal dateTime) { return new VarcharLiteral(dateTime.toJavaDateType().getMonth().getDisplayName(TextStyle.FULL, Locale.getDefault())); } - @ExecFunction(name = "monthname", argTypes = {"DATETIME"}, returnType = "VARCHAR") + @ExecFunction(name = "monthname") public static Expression monthName(DateTimeLiteral dateTime) { return new VarcharLiteral(dateTime.toJavaDateType().getMonth().getDisplayName(TextStyle.FULL, Locale.getDefault())); } - @ExecFunction(name = "monthname", argTypes = {"DATEV2"}, returnType = "VARCHAR") + @ExecFunction(name = "monthname") public static Expression monthName(DateV2Literal date) { return new VarcharLiteral(date.toJavaDateType().getMonth().getDisplayName(TextStyle.FULL, Locale.getDefault())); } - @ExecFunction(name = "from_second", argTypes = {"BIGINT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "from_second") public static Expression fromSecond(BigIntLiteral second) { return fromMicroSecond(second.getValue() * 1000 * 1000); } - @ExecFunction(name = "from_millisecond", argTypes = {"BIGINT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "from_millisecond") public static Expression fromMilliSecond(BigIntLiteral milliSecond) { return fromMicroSecond(milliSecond.getValue() * 1000); } - @ExecFunction(name = "from_microsecond", argTypes = {"BIGINT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "from_microsecond") public static Expression fromMicroSecond(BigIntLiteral microSecond) { return fromMicroSecond(microSecond.getValue()); } @@ -972,187 +972,187 @@ private static Expression fromMicroSecond(long microSecond) { dateTime.getMinute(), dateTime.getSecond(), dateTime.getNano() / 1000); } - @ExecFunction(name = "microseconds_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "microseconds_diff") public static Expression microsecondsDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.MICROS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "milliseconds_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "milliseconds_diff") public static Expression millisecondsDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.MILLIS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "seconds_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "seconds_diff") public static Expression secondsDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.SECONDS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "seconds_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "seconds_diff") public static Expression secondsDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.SECONDS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "seconds_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "seconds_diff") public static Expression secondsDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.SECONDS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "seconds_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "seconds_diff") public static Expression secondsDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.SECONDS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "seconds_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "seconds_diff") public static Expression secondsDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.SECONDS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "minutes_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "minutes_diff") public static Expression minutesDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.MINUTES.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "minutes_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "minutes_diff") public static Expression minutesDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.MINUTES.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "minutes_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "minutes_diff") public static Expression minutesDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.MINUTES.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "minutes_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "minutes_diff") public static Expression minutesDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.MINUTES.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "minutes_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "minutes_diff") public static Expression minutesDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.MINUTES.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "hours_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "hours_diff") public static Expression hoursDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.HOURS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "hours_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "hours_diff") public static Expression hoursDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.HOURS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "hours_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "hours_diff") public static Expression hoursDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.HOURS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "hours_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "hours_diff") public static Expression hoursDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.HOURS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "hours_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "hours_diff") public static Expression hoursDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.HOURS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "days_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "days_diff") public static Expression daysDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.DAYS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "days_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "days_diff") public static Expression daysDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.DAYS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "days_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "days_diff") public static Expression daysDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.DAYS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "days_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "days_diff") public static Expression daysDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.DAYS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "days_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "days_diff") public static Expression daysDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.DAYS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "weeks_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "weeks_diff") public static Expression weeksDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.WEEKS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "weeks_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "weeks_diff") public static Expression weeksDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.WEEKS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "weeks_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "weeks_diff") public static Expression weeksDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.WEEKS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "weeks_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "weeks_diff") public static Expression weeksDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.WEEKS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "weeks_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "weeks_diff") public static Expression weeksDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.WEEKS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "months_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "months_diff") public static Expression monthsDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.MONTHS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "months_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "months_diff") public static Expression monthsDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.MONTHS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "months_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "months_diff") public static Expression monthsDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.MONTHS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "months_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "months_diff") public static Expression monthsDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.MONTHS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "months_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "months_diff") public static Expression monthsDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.MONTHS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "years_diff", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "years_diff") public static Expression yearsDiff(DateTimeV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.YEARS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "years_diff", argTypes = {"DATETIMEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "years_diff") public static Expression yearsDiff(DateTimeV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.YEARS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "years_diff", argTypes = {"DATEV2", "DATETIMEV2"}, returnType = "BIGINT") + @ExecFunction(name = "years_diff") public static Expression yearsDiff(DateV2Literal t1, DateTimeV2Literal t2) { return new BigIntLiteral(ChronoUnit.YEARS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "years_diff", argTypes = {"DATEV2", "DATEV2"}, returnType = "BIGINT") + @ExecFunction(name = "years_diff") public static Expression yearsDiff(DateV2Literal t1, DateV2Literal t2) { return new BigIntLiteral(ChronoUnit.YEARS.between(t2.toJavaDateType(), t1.toJavaDateType())); } - @ExecFunction(name = "years_diff", argTypes = {"DATETIME", "DATETIME"}, returnType = "BIGINT") + @ExecFunction(name = "years_diff") public static Expression yearsDiff(DateTimeLiteral t1, DateTimeLiteral t2) { return new BigIntLiteral(ChronoUnit.YEARS.between(t2.toJavaDateType(), t1.toJavaDateType())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/ExecutableFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/ExecutableFunctions.java index 42ad228ad722d7..e3082b57c2f977 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/ExecutableFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/ExecutableFunctions.java @@ -28,9 +28,7 @@ import org.apache.doris.nereids.trees.expressions.literal.LargeIntLiteral; import org.apache.doris.nereids.trees.expressions.literal.NullLiteral; import org.apache.doris.nereids.trees.expressions.literal.SmallIntLiteral; -import org.apache.doris.nereids.trees.expressions.literal.StringLikeLiteral; import org.apache.doris.nereids.trees.expressions.literal.TinyIntLiteral; -import org.apache.doris.nereids.trees.expressions.literal.VarcharLiteral; import org.apache.doris.nereids.types.DoubleType; import java.math.BigInteger; @@ -47,47 +45,47 @@ public class ExecutableFunctions { /** * other scalar function */ - @ExecFunction(name = "abs", argTypes = {"TINYINT"}, returnType = "SMALLINT") + @ExecFunction(name = "abs") public static Expression abs(TinyIntLiteral literal) { return new SmallIntLiteral((short) Math.abs(literal.getValue())); } - @ExecFunction(name = "abs", argTypes = {"SMALLINT"}, returnType = "INT") + @ExecFunction(name = "abs") public static Expression abs(SmallIntLiteral literal) { return new IntegerLiteral(Math.abs(literal.getValue())); } - @ExecFunction(name = "abs", argTypes = {"INT"}, returnType = "BIGINT") + @ExecFunction(name = "abs") public static Expression abs(IntegerLiteral literal) { return new BigIntLiteral(Math.abs((long) literal.getValue())); } - @ExecFunction(name = "abs", argTypes = {"BIGINT"}, returnType = "LARGEINT") + @ExecFunction(name = "abs") public static Expression abs(BigIntLiteral literal) { return new LargeIntLiteral(BigInteger.valueOf(literal.getValue()).abs()); } - @ExecFunction(name = "abs", argTypes = {"LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "abs") public static Expression abs(LargeIntLiteral literal) { return new LargeIntLiteral(literal.getValue().abs()); } - @ExecFunction(name = "abs", argTypes = {"FLOAT"}, returnType = "FLOAT") + @ExecFunction(name = "abs") public static Expression abs(FloatLiteral literal) { return new FloatLiteral(Math.abs(literal.getValue())); } - @ExecFunction(name = "abs", argTypes = {"DOUBLE"}, returnType = "DOUBLE") + @ExecFunction(name = "abs") public static Expression abs(DoubleLiteral literal) { return new DoubleLiteral(Math.abs(literal.getValue())); } - @ExecFunction(name = "abs", argTypes = {"DECIMALV2"}, returnType = "DECIMALV2") + @ExecFunction(name = "abs") public static Expression abs(DecimalLiteral literal) { return new DecimalLiteral(literal.getValue().abs()); } - @ExecFunction(name = "abs", argTypes = {"DECIMALV3"}, returnType = "DECIMALV3") + @ExecFunction(name = "abs") public static Expression abs(DecimalV3Literal literal) { return new DecimalV3Literal(literal.getValue().abs()); } @@ -95,7 +93,7 @@ public static Expression abs(DecimalV3Literal literal) { /** * acos scalar function */ - @ExecFunction(name = "acos", argTypes = {"DOUBLE"}, returnType = "DOUBLE") + @ExecFunction(name = "acos") public static Expression acos(DoubleLiteral literal) { double result = Math.acos(literal.getValue()); if (Double.isNaN(result)) { @@ -105,21 +103,12 @@ public static Expression acos(DoubleLiteral literal) { } } - @ExecFunction(name = "append_trailing_char_if_absent", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") - public static Expression appendTrailingIfCharAbsent(StringLikeLiteral literal, StringLikeLiteral chr) { - if (literal.getValue().length() != 1) { - return null; - } - return literal.getValue().endsWith(chr.getValue()) ? literal - : new VarcharLiteral(literal.getValue() + chr.getValue()); - } - - @ExecFunction(name = "e", argTypes = {}, returnType = "DOUBLE") + @ExecFunction(name = "e") public static Expression e() { // CHECKSTYLE IGNORE THIS LINE return new DoubleLiteral(Math.E); } - @ExecFunction(name = "p1", argTypes = {}, returnType = "DOUBLE") + @ExecFunction(name = "pi") public static Expression pi() { return new DoubleLiteral(Math.PI); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/NumericArithmetic.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/NumericArithmetic.java index 9477de8ed1a890..ba0b75e75dd77e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/NumericArithmetic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/NumericArithmetic.java @@ -41,169 +41,169 @@ public class NumericArithmetic { /** * Executable arithmetic functions add */ - @ExecFunction(name = "add", argTypes = {"TINYINT", "TINYINT"}, returnType = "SMALLINT") + @ExecFunction(name = "add") public static Expression addTinyIntTinyInt(TinyIntLiteral first, TinyIntLiteral second) { short result = (short) Math.addExact(first.getValue(), second.getValue()); return new SmallIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"TINYINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "add") public static Expression addTinyIntSmallInt(TinyIntLiteral first, SmallIntLiteral second) { int result = Math.addExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "add", argTypes = {"TINYINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addTinyIntInt(TinyIntLiteral first, IntegerLiteral second) { long result = Math.addExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"TINYINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addTinyIntBigInt(TinyIntLiteral first, BigIntLiteral second) { long result = Math.addExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"TINYINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addTinyIntLargeInt(TinyIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().add(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"SMALLINT", "TINYINT"}, returnType = "INT") + @ExecFunction(name = "add") public static Expression addSmallIntTinyInt(SmallIntLiteral first, TinyIntLiteral second) { int result = Math.addExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "add", argTypes = {"SMALLINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "add") public static Expression addSmallIntSmallInt(SmallIntLiteral first, SmallIntLiteral second) { int result = Math.addExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "add", argTypes = {"SMALLINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addSmallIntInt(SmallIntLiteral first, IntegerLiteral second) { long result = Math.addExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"SMALLINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addSmallIntBigInt(SmallIntLiteral first, BigIntLiteral second) { long result = Math.addExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"SMALLINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addSmallIntLargeInt(SmallIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().add(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"INT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addIntTinyInt(IntegerLiteral first, TinyIntLiteral second) { long result = Math.addExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"INT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addIntSmallInt(IntegerLiteral first, SmallIntLiteral second) { long result = Math.addExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"INT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addIntInt(IntegerLiteral first, IntegerLiteral second) { long result = Math.addExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"INT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addIntBigInt(IntegerLiteral first, BigIntLiteral second) { long result = Math.addExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"INT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addIntLargeInt(IntegerLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().add(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"BIGINT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addBigIntTinyInt(BigIntLiteral first, TinyIntLiteral second) { long result = Math.addExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"BIGINT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addBigIntSmallInt(BigIntLiteral first, SmallIntLiteral second) { long result = Math.addExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"BIGINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addBigIntInt(BigIntLiteral first, IntegerLiteral second) { long result = Math.addExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"BIGINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "add") public static Expression addBigIntBigInt(BigIntLiteral first, BigIntLiteral second) { long result = Math.addExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"BIGINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addBigIntLargeInt(BigIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().add(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"LARGEINT", "TINYINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addLargeIntTinyInt(LargeIntLiteral first, TinyIntLiteral second) { BigInteger result = first.getValue().add(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"LARGEINT", "SMALLINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addLargeIntSmallInt(LargeIntLiteral first, SmallIntLiteral second) { BigInteger result = first.getValue().add(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"LARGEINT", "INT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addLargeIntInt(LargeIntLiteral first, IntegerLiteral second) { BigInteger result = first.getValue().add(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"LARGEINT", "BIGINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addLargeIntBigInt(LargeIntLiteral first, BigIntLiteral second) { BigInteger result = first.getValue().add(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"LARGEINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "add") public static Expression addLargeIntLargeInt(LargeIntLiteral first, LargeIntLiteral second) { BigInteger result = first.getValue().add(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "add", argTypes = {"DOUBLE", "DOUBLE"}, returnType = "DOUBLE") + @ExecFunction(name = "add") public static Expression addDoubleDouble(DoubleLiteral first, DoubleLiteral second) { double result = first.getValue() + second.getValue(); return new DoubleLiteral(result); } - @ExecFunction(name = "add", argTypes = {"DECIMALV2", "DECIMALV2"}, returnType = "DECIMALV2") + @ExecFunction(name = "add") public static Expression addDecimalDecimal(DecimalLiteral first, DecimalLiteral second) { BigDecimal result = first.getValue().add(second.getValue()); return new DecimalLiteral(result); } - @ExecFunction(name = "add", argTypes = {"DECIMALV3", "DECIMALV3"}, returnType = "DECIMALV3") + @ExecFunction(name = "add") public static Expression addDecimalV3DecimalV3(DecimalV3Literal first, DecimalV3Literal second) { BigDecimal result = first.getValue().add(second.getValue()); return new DecimalV3Literal((DecimalV3Type) first.getDataType(), result); @@ -212,169 +212,169 @@ public static Expression addDecimalV3DecimalV3(DecimalV3Literal first, DecimalV3 /** * Executable arithmetic functions subtract */ - @ExecFunction(name = "subtract", argTypes = {"TINYINT", "TINYINT"}, returnType = "SMALLINT") + @ExecFunction(name = "subtract") public static Expression subtractTinyIntTinyInt(TinyIntLiteral first, TinyIntLiteral second) { short result = (short) Math.subtractExact(first.getValue(), second.getValue()); return new SmallIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"TINYINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "subtract") public static Expression subtractTinyIntSmallInt(TinyIntLiteral first, SmallIntLiteral second) { int result = Math.subtractExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"TINYINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractTinyIntInt(TinyIntLiteral first, IntegerLiteral second) { long result = Math.subtractExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"TINYINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractTinyIntBigInt(TinyIntLiteral first, BigIntLiteral second) { long result = Math.subtractExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"TINYINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "subtract") public static Expression subtractTinyIntLargeInt(TinyIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().subtract(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"SMALLINT", "TINYINT"}, returnType = "INT") + @ExecFunction(name = "subtract") public static Expression subtractSmallIntTinyInt(SmallIntLiteral first, TinyIntLiteral second) { int result = Math.subtractExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"SMALLINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "subtract") public static Expression subtractSmallIntSmallInt(SmallIntLiteral first, SmallIntLiteral second) { int result = Math.subtractExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"SMALLINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractSmallIntInt(SmallIntLiteral first, IntegerLiteral second) { long result = Math.subtractExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"SMALLINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractSmallIntBigInt(SmallIntLiteral first, BigIntLiteral second) { long result = Math.subtractExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"SMALLINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "subtract") public static Expression subtractSmallIntLargeInt(SmallIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().subtract(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"INT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractIntTinyInt(IntegerLiteral first, TinyIntLiteral second) { long result = Math.subtractExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"INT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractIntSmallInt(IntegerLiteral first, SmallIntLiteral second) { long result = Math.subtractExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"INT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractIntInt(IntegerLiteral first, IntegerLiteral second) { long result = Math.subtractExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"INT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractIntBigInt(IntegerLiteral first, BigIntLiteral second) { long result = Math.subtractExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"INT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "subtract") public static Expression subtractIntLargeInt(IntegerLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().subtract(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"BIGINT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractBigIntTinyInt(BigIntLiteral first, TinyIntLiteral second) { long result = Math.subtractExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"BIGINT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractBigIntSmallInt(BigIntLiteral first, SmallIntLiteral second) { long result = Math.subtractExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"BIGINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractBigIntInt(BigIntLiteral first, IntegerLiteral second) { long result = Math.subtractExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"BIGINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractBigIntBigInt(BigIntLiteral first, BigIntLiteral second) { long result = Math.subtractExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"BIGINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "subtract") public static Expression subtractBigIntLargeInt(BigIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().subtract(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"LARGEINT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractLargeIntTinyInt(LargeIntLiteral first, TinyIntLiteral second) { BigInteger result = first.getValue().subtract(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"LARGEINT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractLargeIntSmallInt(LargeIntLiteral first, SmallIntLiteral second) { BigInteger result = first.getValue().subtract(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"LARGEINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractLargeIntInt(LargeIntLiteral first, IntegerLiteral second) { BigInteger result = first.getValue().subtract(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"LARGEINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "subtract") public static Expression subtractLargeIntBigInt(LargeIntLiteral first, BigIntLiteral second) { BigInteger result = first.getValue().subtract(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"LARGEINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "subtract") public static Expression subtractLargeIntLargeInt(LargeIntLiteral first, LargeIntLiteral second) { BigInteger result = first.getValue().subtract(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"DOUBLE", "DOUBLE"}, returnType = "DOUBLE") + @ExecFunction(name = "subtract") public static Expression subtractDoubleDouble(DoubleLiteral first, DoubleLiteral second) { double result = first.getValue() - second.getValue(); return new DoubleLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"DECIMALV2", "DECIMALV2"}, returnType = "DECIMALV2") + @ExecFunction(name = "subtract") public static Expression subtractDecimalDecimal(DecimalLiteral first, DecimalLiteral second) { BigDecimal result = first.getValue().subtract(second.getValue()); return new DecimalLiteral(result); } - @ExecFunction(name = "subtract", argTypes = {"DECIMALV3", "DECIMALV3"}, returnType = "DECIMALV3") + @ExecFunction(name = "subtract") public static Expression subtractDecimalV3DecimalV3(DecimalV3Literal first, DecimalV3Literal second) { BigDecimal result = first.getValue().subtract(second.getValue()); return new DecimalV3Literal((DecimalV3Type) first.getDataType(), result); @@ -383,163 +383,163 @@ public static Expression subtractDecimalV3DecimalV3(DecimalV3Literal first, Deci /** * Executable arithmetic functions multiply */ - @ExecFunction(name = "multiply", argTypes = {"TINYINT", "TINYINT"}, returnType = "SMALLINT") + @ExecFunction(name = "multiply") public static Expression multiplyTinyIntTinyInt(TinyIntLiteral first, TinyIntLiteral second) { short result = (short) Math.multiplyExact(first.getValue(), second.getValue()); return new SmallIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"TINYINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "multiply") public static Expression multiplyTinyIntSmallInt(TinyIntLiteral first, SmallIntLiteral second) { int result = Math.multiplyExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"TINYINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyTinyIntInt(TinyIntLiteral first, IntegerLiteral second) { long result = Math.multiplyExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"TINYINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyTinyIntBigInt(TinyIntLiteral first, BigIntLiteral second) { long result = Math.multiplyExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"TINYINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "multiply") public static Expression multiplyTinyIntLargeInt(TinyIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().multiply(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"SMALLINT", "TINYINT"}, returnType = "INT") + @ExecFunction(name = "multiply") public static Expression multiplySmallIntTinyInt(SmallIntLiteral first, TinyIntLiteral second) { int result = Math.multiplyExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"SMALLINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "multiply") public static Expression multiplySmallIntSmallInt(SmallIntLiteral first, SmallIntLiteral second) { int result = Math.multiplyExact(first.getValue(), second.getValue()); return new IntegerLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"SMALLINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplySmallIntInt(SmallIntLiteral first, IntegerLiteral second) { long result = Math.multiplyExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"SMALLINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplySmallIntBigInt(SmallIntLiteral first, BigIntLiteral second) { long result = Math.multiplyExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"SMALLINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "multiply") public static Expression multiplySmallIntLargeInt(SmallIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().multiply(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"INT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyIntTinyInt(IntegerLiteral first, TinyIntLiteral second) { long result = Math.multiplyExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"INT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyIntSmallInt(IntegerLiteral first, SmallIntLiteral second) { long result = Math.multiplyExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"INT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyIntInt(IntegerLiteral first, IntegerLiteral second) { long result = Math.multiplyExact((long) first.getValue(), (long) second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"INT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyIntBigInt(IntegerLiteral first, BigIntLiteral second) { long result = Math.multiplyExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"INT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "multiply") public static Expression multiplyIntLargeInt(IntegerLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().multiply(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"BIGINT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyBigIntTinyInt(BigIntLiteral first, TinyIntLiteral second) { long result = Math.multiplyExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"BIGINT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyBigIntSmallInt(BigIntLiteral first, SmallIntLiteral second) { long result = Math.multiplyExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"BIGINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyBigIntInt(BigIntLiteral first, IntegerLiteral second) { long result = Math.multiplyExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"BIGINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyBigIntBigInt(BigIntLiteral first, BigIntLiteral second) { long result = Math.multiplyExact(first.getValue(), second.getValue()); return new BigIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"BIGINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "multiply") public static Expression multiplyBigIntLargeInt(BigIntLiteral first, LargeIntLiteral second) { BigInteger result = second.getValue().multiply(new BigInteger(first.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"LARGEINT", "TINYINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyLargeIntTinyInt(LargeIntLiteral first, TinyIntLiteral second) { BigInteger result = first.getValue().multiply(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"LARGEINT", "SMALLINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyLargeIntSmallInt(LargeIntLiteral first, SmallIntLiteral second) { BigInteger result = first.getValue().multiply(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"LARGEINT", "INT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyLargeIntInt(LargeIntLiteral first, IntegerLiteral second) { BigInteger result = first.getValue().multiply(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"LARGEINT", "BIGINT"}, returnType = "BIGINT") + @ExecFunction(name = "multiply") public static Expression multiplyLargeIntBigInt(LargeIntLiteral first, BigIntLiteral second) { BigInteger result = first.getValue().multiply(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"LARGEINT", "LARGEINT"}, returnType = "LARGEINT") + @ExecFunction(name = "multiply") public static Expression multiplyLargeIntLargeInt(LargeIntLiteral first, LargeIntLiteral second) { BigInteger result = first.getValue().multiply(new BigInteger(second.getValue().toString())); return new LargeIntLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"DOUBLE", "DOUBLE"}, returnType = "DOUBLE") + @ExecFunction(name = "multiply") public static Expression multiplyDoubleDouble(DoubleLiteral first, DoubleLiteral second) { double result = first.getValue() * second.getValue(); return new DoubleLiteral(result); } - @ExecFunction(name = "multiply", argTypes = {"DECIMALV2", "DECIMALV2"}, returnType = "DECIMALV2") + @ExecFunction(name = "multiply") public static Expression multiplyDecimalDecimal(DecimalLiteral first, DecimalLiteral second) { BigDecimal result = first.getValue().multiply(second.getValue()); return new DecimalLiteral(result); @@ -548,7 +548,7 @@ public static Expression multiplyDecimalDecimal(DecimalLiteral first, DecimalLit /** * decimalV3 multiply in FE */ - @ExecFunction(name = "multiply", argTypes = {"DECIMALV3", "DECIMALV3"}, returnType = "DECIMALV3") + @ExecFunction(name = "multiply") public static Expression multiplyDecimalV3DecimalV3(DecimalV3Literal first, DecimalV3Literal second) { BigDecimal result = first.getValue().multiply(second.getValue()); DecimalV3Type t1 = (DecimalV3Type) first.getDataType(); @@ -561,7 +561,7 @@ public static Expression multiplyDecimalV3DecimalV3(DecimalV3Literal first, Deci /** * Executable arithmetic functions divide */ - @ExecFunction(name = "divide", argTypes = {"DOUBLE", "DOUBLE"}, returnType = "DOUBLE") + @ExecFunction(name = "divide") public static Expression divideDouble(DoubleLiteral first, DoubleLiteral second) { if (second.getValue() == 0.0) { return new NullLiteral(first.getDataType()); @@ -573,7 +573,7 @@ public static Expression divideDouble(DoubleLiteral first, DoubleLiteral second) /** * Executable arithmetic functions divide */ - @ExecFunction(name = "divide", argTypes = {"DECIMALV2", "DECIMALV2"}, returnType = "DECIMALV2") + @ExecFunction(name = "divide") public static Expression divideDecimal(DecimalLiteral first, DecimalLiteral second) { if (first.getValue().compareTo(BigDecimal.ZERO) == 0) { return new NullLiteral(first.getDataType()); @@ -585,7 +585,7 @@ public static Expression divideDecimal(DecimalLiteral first, DecimalLiteral seco /** * decimalv3 divide in FE */ - @ExecFunction(name = "divide", argTypes = {"DECIMALV3", "DECIMALV3"}, returnType = "DECIMALV3") + @ExecFunction(name = "divide") public static Expression divideDecimalV3(DecimalV3Literal first, DecimalV3Literal second) { if (second.getValue().compareTo(BigDecimal.ZERO) == 0) { return new NullLiteral(first.getDataType()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/StringArithmetic.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/StringArithmetic.java index 6f2ff11ad9a139..844c0e8a3e9502 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/StringArithmetic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/StringArithmetic.java @@ -44,6 +44,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URLDecoder; +import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -67,7 +68,7 @@ private static Expression castStringLikeLiteral(StringLikeLiteral first, String /** * Executable arithmetic functions concat */ - @ExecFunction(name = "concat", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "concat") public static Expression concatVarcharVarchar(StringLikeLiteral first, StringLikeLiteral second) { String result = first.getValue() + second.getValue(); return castStringLikeLiteral(first, result); @@ -102,7 +103,7 @@ private static String substringImpl(String first, int second, int third) { /** * Executable arithmetic functions substring */ - @ExecFunction(name = "substring", argTypes = {"VARCHAR", "INT", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "substring") public static Expression substringVarcharIntInt(StringLikeLiteral first, IntegerLiteral second, IntegerLiteral third) { return castStringLikeLiteral(first, substringImpl(first.getValue(), second.getValue(), third.getValue())); @@ -111,7 +112,7 @@ public static Expression substringVarcharIntInt(StringLikeLiteral first, /** * Executable arithmetic functions length */ - @ExecFunction(name = "length", argTypes = {"VARCHAR"}, returnType = "INT") + @ExecFunction(name = "length") public static Expression lengthVarchar(StringLikeLiteral first) { return new IntegerLiteral(first.getValue().length()); } @@ -119,7 +120,7 @@ public static Expression lengthVarchar(StringLikeLiteral first) { /** * Executable arithmetic functions Lower */ - @ExecFunction(name = "lower", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "lower") public static Expression lowerVarchar(StringLikeLiteral first) { return castStringLikeLiteral(first, first.getValue().toLowerCase()); } @@ -127,7 +128,7 @@ public static Expression lowerVarchar(StringLikeLiteral first) { /** * Executable arithmetic functions Upper */ - @ExecFunction(name = "upper", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "upper") public static Expression upperVarchar(StringLikeLiteral first) { return castStringLikeLiteral(first, first.getValue().toUpperCase()); } @@ -153,7 +154,7 @@ private static String trimImpl(String first, String second, boolean left, boolea /** * Executable arithmetic functions Trim */ - @ExecFunction(name = "trim", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "trim") public static Expression trimVarchar(StringLikeLiteral first) { return castStringLikeLiteral(first, trimImpl(first.getValue(), " ", true, true)); } @@ -161,7 +162,7 @@ public static Expression trimVarchar(StringLikeLiteral first) { /** * Executable arithmetic functions Trim */ - @ExecFunction(name = "trim", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "trim") public static Expression trimVarcharVarchar(StringLikeLiteral first, StringLikeLiteral second) { return castStringLikeLiteral(first, trimImpl(first.getValue(), second.getValue(), true, true)); } @@ -169,7 +170,7 @@ public static Expression trimVarcharVarchar(StringLikeLiteral first, StringLikeL /** * Executable arithmetic functions ltrim */ - @ExecFunction(name = "ltrim", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "ltrim") public static Expression ltrimVarchar(StringLikeLiteral first) { return castStringLikeLiteral(first, trimImpl(first.getValue(), " ", true, false)); } @@ -177,7 +178,7 @@ public static Expression ltrimVarchar(StringLikeLiteral first) { /** * Executable arithmetic functions ltrim */ - @ExecFunction(name = "ltrim", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "ltrim") public static Expression ltrimVarcharVarchar(StringLikeLiteral first, StringLikeLiteral second) { return castStringLikeLiteral(first, trimImpl(first.getValue(), second.getValue(), true, false)); } @@ -185,7 +186,7 @@ public static Expression ltrimVarcharVarchar(StringLikeLiteral first, StringLike /** * Executable arithmetic functions rtrim */ - @ExecFunction(name = "rtrim", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "rtrim") public static Expression rtrimVarchar(StringLikeLiteral first) { return castStringLikeLiteral(first, trimImpl(first.getValue(), " ", false, true)); } @@ -193,7 +194,7 @@ public static Expression rtrimVarchar(StringLikeLiteral first) { /** * Executable arithmetic functions rtrim */ - @ExecFunction(name = "rtrim", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "rtrim") public static Expression rtrimVarcharVarchar(StringLikeLiteral first, StringLikeLiteral second) { return castStringLikeLiteral(first, trimImpl(first.getValue(), second.getValue(), false, true)); } @@ -201,7 +202,7 @@ public static Expression rtrimVarcharVarchar(StringLikeLiteral first, StringLike /** * Executable arithmetic functions Replace */ - @ExecFunction(name = "replace", argTypes = {"VARCHAR", "VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "replace") public static Expression replace(StringLikeLiteral first, StringLikeLiteral second, StringLikeLiteral third) { if (second.getValue().length() == 0) { return castStringLikeLiteral(first, first.getValue()); @@ -212,7 +213,7 @@ public static Expression replace(StringLikeLiteral first, StringLikeLiteral seco /** * Executable arithmetic functions Left */ - @ExecFunction(name = "left", argTypes = {"VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "left") public static Expression left(StringLikeLiteral first, IntegerLiteral second) { if (second.getValue() <= 0) { return castStringLikeLiteral(first, ""); @@ -226,7 +227,7 @@ public static Expression left(StringLikeLiteral first, IntegerLiteral second) { /** * Executable arithmetic functions Right */ - @ExecFunction(name = "right", argTypes = {"VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "right") public static Expression right(StringLikeLiteral first, IntegerLiteral second) { if (second.getValue() < (- first.getValue().length()) || Math.abs(second.getValue()) == 0) { return castStringLikeLiteral(first, ""); @@ -246,7 +247,7 @@ public static Expression right(StringLikeLiteral first, IntegerLiteral second) { /** * Executable arithmetic functions Locate */ - @ExecFunction(name = "locate", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "INT") + @ExecFunction(name = "locate") public static Expression locate(StringLikeLiteral first, StringLikeLiteral second) { return new IntegerLiteral(second.getValue().trim().indexOf(first.getValue()) + 1); } @@ -254,7 +255,7 @@ public static Expression locate(StringLikeLiteral first, StringLikeLiteral secon /** * Executable arithmetic functions Instr */ - @ExecFunction(name = "instr", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "INT") + @ExecFunction(name = "instr") public static Expression instr(StringLikeLiteral first, StringLikeLiteral second) { return new IntegerLiteral(first.getValue().indexOf(second.getValue()) + 1); } @@ -262,7 +263,7 @@ public static Expression instr(StringLikeLiteral first, StringLikeLiteral second /** * Executable arithmetic functions Ascii */ - @ExecFunction(name = "ascii", argTypes = {"VARCHAR"}, returnType = "INT") + @ExecFunction(name = "ascii") public static Expression ascii(StringLikeLiteral first) { if (first.getValue().length() == 0) { return new IntegerLiteral(0); @@ -274,7 +275,7 @@ public static Expression ascii(StringLikeLiteral first) { /** * Executable arithmetic functions Bin */ - @ExecFunction(name = "bin", argTypes = {"BIGINT"}, returnType = "VARCHAR") + @ExecFunction(name = "bin") public static Expression bin(BigIntLiteral first) { return new VarcharLiteral(Long.toBinaryString(first.getValue())); } @@ -282,7 +283,7 @@ public static Expression bin(BigIntLiteral first) { /** * Executable arithmetic functions ConcatWs */ - @ExecFunction(name = "concat_ws", argTypes = {"VARCHAR", "ARRAY"}, returnType = "VARCHAR") + @ExecFunction(name = "concat_ws") public static Expression concatWsVarcharArray(StringLikeLiteral first, ArrayLiteral second) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < second.getValue().size() - 1; i++) { @@ -298,7 +299,7 @@ public static Expression concatWsVarcharArray(StringLikeLiteral first, ArrayLite /** * Executable arithmetic functions ConcatWs */ - @ExecFunction(varArgs = true, name = "concat_ws", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "concat_ws") public static Expression concatWsVarcharVarchar(StringLikeLiteral first, VarcharLiteral... second) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < second.length - 1; i++) { @@ -312,7 +313,7 @@ public static Expression concatWsVarcharVarchar(StringLikeLiteral first, Varchar /** * Executable arithmetic functions CharacterLength */ - @ExecFunction(name = "character_length", argTypes = {"VARCHAR"}, returnType = "INT") + @ExecFunction(name = "character_length") public static Expression characterLength(StringLikeLiteral first) { return new IntegerLiteral(first.getValue().length()); } @@ -328,7 +329,7 @@ private static boolean isSeparator(char c) { /** * Executable arithmetic functions initCap */ - @ExecFunction(name = "initcap", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "initcap") public static Expression initCap(StringLikeLiteral first) { StringBuilder result = new StringBuilder(first.getValue().length()); boolean capitalizeNext = true; @@ -350,7 +351,7 @@ public static Expression initCap(StringLikeLiteral first) { /** * Executable arithmetic functions md5 */ - @ExecFunction(name = "md5", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "md5") public static Expression md5(StringLikeLiteral first) { try { MessageDigest md = MessageDigest.getInstance("MD5"); @@ -365,7 +366,7 @@ public static Expression md5(StringLikeLiteral first) { /** * Executable arithmetic functions md5 */ - @ExecFunction(varArgs = true, name = "md5sum", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "md5sum") public static Expression md5Sum(VarcharLiteral... first) { try { // Step 1: Create a MessageDigest instance for MD5 @@ -419,7 +420,7 @@ private static int compareLiteral(Literal first, Literal... second) { /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"INT", "INT"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldInt(IntegerLiteral first, IntegerLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -427,7 +428,7 @@ public static Expression fieldInt(IntegerLiteral first, IntegerLiteral... second /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"TINYINT", "TINYINT"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldTinyInt(TinyIntLiteral first, TinyIntLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -435,7 +436,7 @@ public static Expression fieldTinyInt(TinyIntLiteral first, TinyIntLiteral... se /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"SMALLINT", "SMALLINT"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldSmallInt(SmallIntLiteral first, SmallIntLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -443,7 +444,7 @@ public static Expression fieldSmallInt(SmallIntLiteral first, SmallIntLiteral... /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"BIGINT", "BIGINT"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldBigInt(BigIntLiteral first, BigIntLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -451,7 +452,7 @@ public static Expression fieldBigInt(BigIntLiteral first, BigIntLiteral... secon /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"LARGEINT", "LARGEINT"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldLargeInt(LargeIntLiteral first, LargeIntLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -459,7 +460,7 @@ public static Expression fieldLargeInt(LargeIntLiteral first, LargeIntLiteral... /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"FLOAT", "FLOAT"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldFloat(FloatLiteral first, FloatLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -467,7 +468,7 @@ public static Expression fieldFloat(FloatLiteral first, FloatLiteral... second) /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"DOUBLE", "DOUBLE"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldDouble(DoubleLiteral first, DoubleLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -475,7 +476,7 @@ public static Expression fieldDouble(DoubleLiteral first, DoubleLiteral... secon /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"DECIMAL", "DECIMAL"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldDecimalV2(DecimalLiteral first, DecimalLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -483,7 +484,7 @@ public static Expression fieldDecimalV2(DecimalLiteral first, DecimalLiteral... /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"DECIMALV3", "DECIMALV3"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldDecimalV3(DecimalV3Literal first, DecimalV3Literal... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -491,7 +492,7 @@ public static Expression fieldDecimalV3(DecimalV3Literal first, DecimalV3Literal /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"DATETIME", "DATETIME"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldDateTime(DateTimeLiteral first, DateTimeLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -499,7 +500,7 @@ public static Expression fieldDateTime(DateTimeLiteral first, DateTimeLiteral... /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldDateTimeV2(DateTimeV2Literal first, DateTimeV2Literal... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -507,7 +508,7 @@ public static Expression fieldDateTimeV2(DateTimeV2Literal first, DateTimeV2Lite /** * Executable arithmetic functions field */ - @ExecFunction(varArgs = true, name = "field", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "INT") + @ExecFunction(name = "field") public static Expression fieldVarchar(StringLikeLiteral first, VarcharLiteral... second) { return new IntegerLiteral(compareLiteral(first, second)); } @@ -525,7 +526,7 @@ private static int findStringInSet(String target, String input) { /** * Executable arithmetic functions find_in_set */ - @ExecFunction(name = "find_in_set", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "INT") + @ExecFunction(name = "find_in_set") public static Expression findInSetVarchar(StringLikeLiteral first, StringLikeLiteral second) { return new IntegerLiteral(findStringInSet(first.getValue(), second.getValue())); } @@ -533,7 +534,7 @@ public static Expression findInSetVarchar(StringLikeLiteral first, StringLikeLit /** * Executable arithmetic functions repeat */ - @ExecFunction(name = "repeat", argTypes = {"VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "repeat") public static Expression repeat(StringLikeLiteral first, IntegerLiteral second) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < second.getValue(); i++) { @@ -545,7 +546,7 @@ public static Expression repeat(StringLikeLiteral first, IntegerLiteral second) /** * Executable arithmetic functions reverse */ - @ExecFunction(name = "reverse", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "reverse") public static Expression reverseVarchar(StringLikeLiteral first) { StringBuilder sb = new StringBuilder(); sb.append(first.getValue()); @@ -555,7 +556,7 @@ public static Expression reverseVarchar(StringLikeLiteral first) { /** * Executable arithmetic functions space */ - @ExecFunction(name = "space", argTypes = {"INT"}, returnType = "VARCHAR") + @ExecFunction(name = "space") public static Expression space(IntegerLiteral first) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < first.getValue(); i++) { @@ -567,7 +568,7 @@ public static Expression space(IntegerLiteral first) { /** * Executable arithmetic functions split_by_char */ - @ExecFunction(name = "split_by_char", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "ARRAY") + @ExecFunction(name = "split_by_char") public static Expression splitByChar(StringLikeLiteral first, StringLikeLiteral second) { String[] result = first.getValue().split(second.getValue()); List items = new ArrayList<>(); @@ -580,7 +581,7 @@ public static Expression splitByChar(StringLikeLiteral first, StringLikeLiteral /** * Executable arithmetic functions split_part */ - @ExecFunction(name = "split_part", argTypes = {"VARCHAR", "VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "split_part") public static Expression splitPart(StringLikeLiteral first, StringLikeLiteral chr, IntegerLiteral number) { if (first.getValue().equals(chr.getValue())) { if (Math.abs(number.getValue()) == 1 || Math.abs(number.getValue()) == 2) { @@ -611,7 +612,7 @@ public static Expression splitPart(StringLikeLiteral first, StringLikeLiteral ch return castStringLikeLiteral(first, ""); } } - return new NullLiteral(); + return new NullLiteral(first.getDataType()); } else if (number.getValue() < 0) { StringBuilder result = new StringBuilder(parts[Math.abs(number.getValue()) - 1]); return castStringLikeLiteral(first, result.reverse().toString()); @@ -623,7 +624,7 @@ public static Expression splitPart(StringLikeLiteral first, StringLikeLiteral ch /** * Executable arithmetic functions substring_index */ - @ExecFunction(name = "substring_index", argTypes = {"VARCHAR", "VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "substring_index") public static Expression substringIndex(StringLikeLiteral first, StringLikeLiteral chr, IntegerLiteral number) { String[] parts = first.getValue().split(chr.getValue()); if (Math.abs(number.getValue()) >= parts.length) { @@ -652,7 +653,7 @@ public static Expression substringIndex(StringLikeLiteral first, StringLikeLiter /** * Executable arithmetic functions strcmp */ - @ExecFunction(name = "strcmp", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "TINYINT") + @ExecFunction(name = "strcmp") public static Expression strcmp(StringLikeLiteral first, StringLikeLiteral second) { int result = first.getValue().compareTo(second.getValue()); if (result == 0) { @@ -667,7 +668,7 @@ public static Expression strcmp(StringLikeLiteral first, StringLikeLiteral secon /** * Executable arithmetic functions strLeft */ - @ExecFunction(name = "strleft", argTypes = {"VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "strleft") public static Expression strLeft(StringLikeLiteral first, IntegerLiteral second) { if (second.getValue() <= 0) { return castStringLikeLiteral(first, ""); @@ -681,7 +682,7 @@ public static Expression strLeft(StringLikeLiteral first, IntegerLiteral second) /** * Executable arithmetic functions strRight */ - @ExecFunction(name = "strright", argTypes = {"VARCHAR", "INT"}, returnType = "VARCHAR") + @ExecFunction(name = "strright") public static Expression strRight(StringLikeLiteral first, IntegerLiteral second) { if (second.getValue() < (- first.getValue().length()) || Math.abs(second.getValue()) == 0) { return castStringLikeLiteral(first, ""); @@ -701,7 +702,7 @@ public static Expression strRight(StringLikeLiteral first, IntegerLiteral second /** * Executable arithmetic functions overlay */ - @ExecFunction(name = "overlay", argTypes = {"VARCHAR", "INT", "INT", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "overlay") public static Expression overlay(StringLikeLiteral first, IntegerLiteral second, IntegerLiteral third, StringLikeLiteral four) { StringBuilder sb = new StringBuilder(); @@ -725,7 +726,7 @@ public static Expression overlay(StringLikeLiteral first, /** * Executable arithmetic functions parseurl */ - @ExecFunction(name = "parse_url", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "parse_url") public static Expression parseurl(StringLikeLiteral first, StringLikeLiteral second) { URI uri = null; try { @@ -780,7 +781,7 @@ public static Expression parseurl(StringLikeLiteral first, StringLikeLiteral sec /** * Executable arithmetic functions urldecode */ - @ExecFunction(name = "url_decode", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "url_decode") public static Expression urlDecode(StringLikeLiteral first) { try { return castStringLikeLiteral(first, URLDecoder.decode(first.getValue(), StandardCharsets.UTF_8.name())); @@ -789,11 +790,26 @@ public static Expression urlDecode(StringLikeLiteral first) { } } + /** + * Executable arithmetic functions urlencode + */ + @ExecFunction(name = "url_encode") + public static Expression urlEncode(StringLikeLiteral first) { + try { + return castStringLikeLiteral(first, URLEncoder.encode(first.getValue(), StandardCharsets.UTF_8.name())); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + /** * Executable arithmetic functions append_trailing_char_if_absent */ - @ExecFunction(name = "append_trailing_char_if_absent", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "append_trailing_char_if_absent") public static Expression appendTrailingCharIfAbsent(StringLikeLiteral first, StringLikeLiteral second) { + if (second.getValue().length() != 1) { + return new NullLiteral(first.getDataType()); + } if (first.getValue().endsWith(second.getValue())) { return first; } else { @@ -804,7 +820,7 @@ public static Expression appendTrailingCharIfAbsent(StringLikeLiteral first, Str /** * Executable arithmetic functions endsWith */ - @ExecFunction(name = "ends_with", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "BOOLEAN") + @ExecFunction(name = "ends_with") public static Expression endsWith(StringLikeLiteral first, StringLikeLiteral second) { if (first.getValue().endsWith(second.getValue())) { return BooleanLiteral.TRUE; @@ -816,7 +832,7 @@ public static Expression endsWith(StringLikeLiteral first, StringLikeLiteral sec /** * Executable arithmetic functions extractUrlParameter */ - @ExecFunction(name = "extract_url_parameter", argTypes = {"VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "extract_url_parameter") public static Expression extractUrlParameter(StringLikeLiteral first, StringLikeLiteral second) { if (first.getValue() == null || first.getValue().indexOf('?') == -1) { return castStringLikeLiteral(first, ""); @@ -840,7 +856,7 @@ public static Expression extractUrlParameter(StringLikeLiteral first, StringLike /** * Executable arithmetic functions quote */ - @ExecFunction(name = "quote", argTypes = {"VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "quote") public static Expression quote(StringLikeLiteral first) { return castStringLikeLiteral(first, "\'" + first.getValue() + "\'"); } @@ -848,7 +864,7 @@ public static Expression quote(StringLikeLiteral first) { /** * Executable arithmetic functions replaceEmpty */ - @ExecFunction(name = "replace_empty", argTypes = {"VARCHAR", "VARCHAR", "VARCHAR"}, returnType = "VARCHAR") + @ExecFunction(name = "replace_empty") public static Expression replaceEmpty(StringLikeLiteral first, StringLikeLiteral second, StringLikeLiteral third) { return castStringLikeLiteral(first, first.getValue().replace(second.getValue(), third.getValue())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/TimeRoundSeries.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/TimeRoundSeries.java index 3a98ee6252791a..41254428322b67 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/TimeRoundSeries.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/executable/TimeRoundSeries.java @@ -137,73 +137,73 @@ private static LocalDateTime getDateCeilOrFloor(DATE tag, LocalDateTime date, in /** * datetime arithmetic function year-ceil */ - @ExecFunction(name = "year_ceil", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "year_ceil", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_ceil") public static Expression yearCeil(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); @@ -212,73 +212,73 @@ public static Expression yearCeil(DateTimeV2Literal date, IntegerLiteral period, /** * datetime arithmetic function month-ceil */ - @ExecFunction(name = "month_ceil", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "month_ceil", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_ceil") public static Expression monthCeil(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); @@ -287,73 +287,73 @@ public static Expression monthCeil(DateTimeV2Literal date, IntegerLiteral period /** * datetime arithmetic function day-ceil */ - @ExecFunction(name = "day_ceil", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "day_ceil", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_ceil") public static Expression dayCeil(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); @@ -362,73 +362,73 @@ public static Expression dayCeil(DateTimeV2Literal date, IntegerLiteral period, /** * datetime arithmetic function hour-ceil */ - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "hour_ceil", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_ceil") public static Expression hourCeil(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); @@ -437,73 +437,73 @@ public static Expression hourCeil(DateTimeV2Literal date, IntegerLiteral period, /** * datetime arithmetic function minute-ceil */ - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "minute_ceil", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_ceil") public static Expression minuteCeil(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), @@ -513,73 +513,73 @@ public static Expression minuteCeil(DateTimeV2Literal date, IntegerLiteral perio /** * datetime arithmetic function SECOND-ceil */ - @ExecFunction(name = "second_ceil", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, origin.toJavaDateType(), true)); } - @ExecFunction(name = "second_ceil", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_ceil") public static Expression secondCeil(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), @@ -589,73 +589,73 @@ public static Expression secondCeil(DateTimeV2Literal date, IntegerLiteral perio /** * datetime arithmetic function year-floor */ - @ExecFunction(name = "year_floor", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "year_floor", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "year_floor") public static Expression yearFloor(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.YEAR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); @@ -664,73 +664,73 @@ public static Expression yearFloor(DateTimeV2Literal date, IntegerLiteral period /** * datetime arithmetic function month-floor */ - @ExecFunction(name = "month_floor", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "month_floor", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "month_floor") public static Expression monthFloor(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MONTH, date.toJavaDateType(), @@ -740,73 +740,73 @@ public static Expression monthFloor(DateTimeV2Literal date, IntegerLiteral perio /** * datetime arithmetic function day-floor */ - @ExecFunction(name = "day_floor", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "day_floor", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "day_floor") public static Expression dayFloor(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.DAY, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); @@ -815,73 +815,73 @@ public static Expression dayFloor(DateTimeV2Literal date, IntegerLiteral period, /** * datetime arithmetic function hour-floor */ - @ExecFunction(name = "hour_floor", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "hour_floor", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "hour_floor") public static Expression hourFloor(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.HOUR, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); @@ -890,73 +890,73 @@ public static Expression hourFloor(DateTimeV2Literal date, IntegerLiteral period /** * datetime arithmetic function minute-floor */ - @ExecFunction(name = "minute_floor", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "minute_floor", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "minute_floor") public static Expression minuteFloor(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.MINUTE, date.toJavaDateType(), @@ -966,73 +966,73 @@ public static Expression minuteFloor(DateTimeV2Literal date, IntegerLiteral peri /** * datetime arithmetic function SECOND-floor */ - @ExecFunction(name = "second_floor", argTypes = {"DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeLiteral date) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIME", "INT"}, returnType = "DATETIME") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeLiteral date, IntegerLiteral period) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIME", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeLiteral date, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIME", "INT", "DATETIME"}, returnType = "DATETIME") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeLiteral date, IntegerLiteral period, DateTimeLiteral origin) { return DateTimeLiteral.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateV2Literal date) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATEV2", "INT"}, returnType = "DATEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateV2Literal date, IntegerLiteral period) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATEV2", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateV2Literal date, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATEV2", "INT", "DATEV2"}, returnType = "DATEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateV2Literal date, IntegerLiteral period, DateV2Literal origin) { return DateV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), origin.toJavaDateType(), false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeV2Literal date) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIMEV2", "INT"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeV2Literal date, IntegerLiteral period) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), period.getValue(), START_ORIGINAL_DAY, false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIMEV2", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeV2Literal date, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), 1, origin.toJavaDateType(), false)); } - @ExecFunction(name = "second_floor", argTypes = {"DATETIMEV2", "INT", "DATETIMEV2"}, returnType = "DATETIMEV2") + @ExecFunction(name = "second_floor") public static Expression secondFloor(DateTimeV2Literal date, IntegerLiteral period, DateTimeV2Literal origin) { return DateTimeV2Literal.fromJavaDateType(getDateCeilOrFloor(DATE.SECOND, date.toJavaDateType(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ArrayApply.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ArrayApply.java index 07e4c16d776653..86138b82cac170 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ArrayApply.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ArrayApply.java @@ -73,7 +73,7 @@ public void checkLegalityBeforeTypeCoercion() { DataType argType = ((ArrayType) child(0).getDataType()).getItemType(); if (!(argType.isIntegralType() || argType.isFloatLikeType() || argType.isDecimalLikeType() || argType.isDateLikeType() || argType.isBooleanType())) { - throw new AnalysisException("array_apply does not support type: " + toSql()); + throw new AnalysisException("array_apply does not support type " + argType + ", expression is " + toSql()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/AssertTrue.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/AssertTrue.java index a4bffd9903add2..b45b3426a2519a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/AssertTrue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/AssertTrue.java @@ -22,6 +22,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.NoneMovableFunction; import org.apache.doris.nereids.trees.expressions.shape.BinaryExpression; import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; import org.apache.doris.nereids.types.BooleanType; @@ -36,7 +37,7 @@ * ScalarFunction 'assert_true'. */ public class AssertTrue extends ScalarFunction - implements BinaryExpression, ExplicitlyCastableSignature, AlwaysNotNullable { + implements BinaryExpression, ExplicitlyCastableSignature, AlwaysNotNullable, NoneMovableFunction { public static final List SIGNATURES = ImmutableList.of( FunctionSignature.ret(BooleanType.INSTANCE).args(BooleanType.INSTANCE, VarcharType.SYSTEM_DEFAULT)); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/FromIso8601Date.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/FromIso8601Date.java new file mode 100644 index 00000000000000..e0e3e41548a479 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/FromIso8601Date.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.AlwaysNullable; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.DateV2Type; +import org.apache.doris.nereids.types.StringType; +import org.apache.doris.nereids.types.VarcharType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'from_iso8601_date'. This class is generated by GenerateFunction. + */ +public class FromIso8601Date extends ScalarFunction + implements UnaryExpression, ExplicitlyCastableSignature, AlwaysNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(DateV2Type.INSTANCE).args(VarcharType.SYSTEM_DEFAULT), + FunctionSignature.ret(DateV2Type.INSTANCE).args(StringType.INSTANCE) + ); + + /** + * constructor with 1 argument. + */ + public FromIso8601Date(Expression arg0) { + super("from_iso8601_date", arg0); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + /** + * withChildren. + */ + @Override + public FromIso8601Date withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new FromIso8601Date(children.get(0)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitFromIso8601Date(this, context); + } + +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSearch.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSearch.java new file mode 100644 index 00000000000000..6f034308cf78ce --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/JsonSearch.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.AlwaysNullable; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.JsonType; +import org.apache.doris.nereids.types.VarcharType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * JsonSearch returns the json path pointing to a json string witch contains the search string. + */ +public class JsonSearch extends ScalarFunction implements ExplicitlyCastableSignature, AlwaysNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(JsonType.INSTANCE) + .args(VarcharType.SYSTEM_DEFAULT, VarcharType.SYSTEM_DEFAULT, VarcharType.SYSTEM_DEFAULT) + ); + + public JsonSearch(Expression arg0, Expression arg1, Expression arg2) { + super("json_search", arg0, arg1, arg2); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + @Override + public JsonSearch withChildren(List children) { + Preconditions.checkArgument(children.size() == 3); + return new JsonSearch(children.get(0), children.get(1), children.get(2)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitJsonSearch(this, context); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/LastQueryId.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/LastQueryId.java new file mode 100644 index 00000000000000..61ef773e2ca75b --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/LastQueryId.java @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.functions.AlwaysNotNullable; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.shape.LeafExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.VarcharType; + +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'last_query_id'. + */ +public class LastQueryId extends ScalarFunction + implements LeafExpression, ExplicitlyCastableSignature, AlwaysNotNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(VarcharType.SYSTEM_DEFAULT).args() + ); + + public LastQueryId() { + super("last_query_id", ImmutableList.of()); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitLastQueryId(this, context); + } + + @Override + public boolean isDeterministic() { + return false; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/NormalCdf.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/NormalCdf.java new file mode 100644 index 00000000000000..c68e610679c52d --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/NormalCdf.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.AlwaysNullable; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.shape.TernaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.DoubleType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'normal_cdf'. This class is generated by GenerateFunction. + */ +public class NormalCdf extends ScalarFunction + implements TernaryExpression, ExplicitlyCastableSignature, AlwaysNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(DoubleType.INSTANCE) + .args(DoubleType.INSTANCE, DoubleType.INSTANCE, DoubleType.INSTANCE) + ); + + /** + * constructor with 3 arguments. + */ + public NormalCdf(Expression arg0, Expression arg1, Expression arg2) { + super("normal_cdf", arg0, arg1, arg2); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + /** + * withChildren. + */ + @Override + public NormalCdf withChildren(List children) { + Preconditions.checkArgument(children.size() == 3); + return new NormalCdf(children.get(0), children.get(1), children.get(2)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitNormalCdf(this, context); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ToIso8601.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ToIso8601.java new file mode 100644 index 00000000000000..d676157977d284 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/ToIso8601.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.PropagateNullable; +import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.DateTimeType; +import org.apache.doris.nereids.types.DateTimeV2Type; +import org.apache.doris.nereids.types.DateType; +import org.apache.doris.nereids.types.DateV2Type; +import org.apache.doris.nereids.types.StringType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'to_iso8601'. This class is generated by GenerateFunction. + */ +public class ToIso8601 extends ScalarFunction + implements UnaryExpression, ExplicitlyCastableSignature, PropagateNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(StringType.INSTANCE).args(DateTimeV2Type.SYSTEM_DEFAULT), + FunctionSignature.ret(StringType.INSTANCE).args(DateV2Type.INSTANCE), + FunctionSignature.ret(StringType.INSTANCE).args(DateTimeType.INSTANCE), + FunctionSignature.ret(StringType.INSTANCE).args(DateType.INSTANCE) + ); + + /** + * constructor with 1 argument. + */ + public ToIso8601(Expression arg0) { + super("to_iso8601", arg0); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + /** + * withChildren. + */ + @Override + public ToIso8601 withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new ToIso8601(children.get(0)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitToIso8601(this, context); + } + +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Translate.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Translate.java new file mode 100644 index 00000000000000..2edb474a71f56a --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Translate.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.PropagateNullable; +import org.apache.doris.nereids.trees.expressions.shape.TernaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.StringType; +import org.apache.doris.nereids.types.VarcharType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'translate'. This class is generated by GenerateFunction. + */ +public class Translate extends ScalarFunction + implements TernaryExpression, ExplicitlyCastableSignature, PropagateNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(VarcharType.SYSTEM_DEFAULT) + .args(VarcharType.SYSTEM_DEFAULT, VarcharType.SYSTEM_DEFAULT, VarcharType.SYSTEM_DEFAULT), + FunctionSignature.ret(StringType.INSTANCE) + .args(StringType.INSTANCE, StringType.INSTANCE, StringType.INSTANCE) + ); + + /** + * constructor with 3 arguments. + */ + public Translate(Expression arg0, Expression arg1, Expression arg2) { + super("translate", arg0, arg1, arg2); + } + + /** + * withChildren. + */ + @Override + public Translate withChildren(List children) { + Preconditions.checkArgument(children.size() == 3); + return new Translate(children.get(0), children.get(1), children.get(2)); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitTranslate(this, context); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/UrlEncode.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/UrlEncode.java new file mode 100644 index 00000000000000..fd32e953cf9527 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/UrlEncode.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.PropagateNullable; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.StringType; +import org.apache.doris.nereids.types.VarcharType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'url_encode'. This class is generated by GenerateFunction. + */ +public class UrlEncode extends ScalarFunction + implements ExplicitlyCastableSignature, PropagateNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(VarcharType.SYSTEM_DEFAULT).args(VarcharType.SYSTEM_DEFAULT), + FunctionSignature.ret(StringType.INSTANCE).args(StringType.INSTANCE) + ); + + /** + * constructor with 1 argument. + */ + public UrlEncode(Expression arg0) { + super("url_encode", arg0); + } + + + /** + * withChildren. + */ + @Override + public UrlEncode withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new UrlEncode(children.get(0)); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitUrlEncode(this, context); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/table/Numbers.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/table/Numbers.java index 845baa045cc041..26027d1049912f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/table/Numbers.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/table/Numbers.java @@ -70,7 +70,7 @@ public Statistics computeStats(List slots) { Map columnToStatistics = Maps.newHashMap(); ColumnStatisticBuilder statBuilder = new ColumnStatisticBuilder() - .setCount(rowNum).setAvgSizeByte(8).setNumNulls(0).setDataSize(8); + .setAvgSizeByte(8).setNumNulls(0).setDataSize(8); if (numberTvf.getUseConst()) { // a column of const value long value = numberTvf.getConstValue(); statBuilder = statBuilder.setNdv(1).setMinValue(value).setMaxValue(value) diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java index a8deb97ef4efb6..f011fd4fd54fb0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java @@ -191,6 +191,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.Fpow; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromBase64; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromDays; +import org.apache.doris.nereids.trees.expressions.functions.scalar.FromIso8601Date; import org.apache.doris.nereids.trees.expressions.functions.scalar.FromUnixtime; import org.apache.doris.nereids.trees.expressions.functions.scalar.G; import org.apache.doris.nereids.trees.expressions.functions.scalar.GetJsonBigInt; @@ -241,6 +242,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonObject; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonQuote; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonReplace; +import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonSearch; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonSet; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonUnQuote; import org.apache.doris.nereids.trees.expressions.functions.scalar.JsonbExistsPath; @@ -268,6 +270,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.L1Distance; import org.apache.doris.nereids.trees.expressions.functions.scalar.L2Distance; import org.apache.doris.nereids.trees.expressions.functions.scalar.LastDay; +import org.apache.doris.nereids.trees.expressions.functions.scalar.LastQueryId; import org.apache.doris.nereids.trees.expressions.functions.scalar.Least; import org.apache.doris.nereids.trees.expressions.functions.scalar.Left; import org.apache.doris.nereids.trees.expressions.functions.scalar.Length; @@ -318,6 +321,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.MurmurHash364; import org.apache.doris.nereids.trees.expressions.functions.scalar.Negative; import org.apache.doris.nereids.trees.expressions.functions.scalar.NgramSearch; +import org.apache.doris.nereids.trees.expressions.functions.scalar.NormalCdf; import org.apache.doris.nereids.trees.expressions.functions.scalar.NotNullOrEmpty; import org.apache.doris.nereids.trees.expressions.functions.scalar.Now; import org.apache.doris.nereids.trees.expressions.functions.scalar.NullIf; @@ -425,15 +429,18 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIpv6; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIpv6OrDefault; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIpv6OrNull; +import org.apache.doris.nereids.trees.expressions.functions.scalar.ToIso8601; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToMonday; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToQuantileState; import org.apache.doris.nereids.trees.expressions.functions.scalar.Tokenize; +import org.apache.doris.nereids.trees.expressions.functions.scalar.Translate; import org.apache.doris.nereids.trees.expressions.functions.scalar.Trim; import org.apache.doris.nereids.trees.expressions.functions.scalar.Truncate; import org.apache.doris.nereids.trees.expressions.functions.scalar.Unhex; import org.apache.doris.nereids.trees.expressions.functions.scalar.UnixTimestamp; import org.apache.doris.nereids.trees.expressions.functions.scalar.Upper; import org.apache.doris.nereids.trees.expressions.functions.scalar.UrlDecode; +import org.apache.doris.nereids.trees.expressions.functions.scalar.UrlEncode; import org.apache.doris.nereids.trees.expressions.functions.scalar.User; import org.apache.doris.nereids.trees.expressions.functions.scalar.UtcTimestamp; import org.apache.doris.nereids.trees.expressions.functions.scalar.Uuid; @@ -1187,6 +1194,10 @@ default R visitFromDays(FromDays fromDays, C context) { return visitScalarFunction(fromDays, context); } + default R visitFromIso8601Date(FromIso8601Date fromIso8601Date, C context) { + return visitScalarFunction(fromIso8601Date, context); + } + default R visitFromUnixtime(FromUnixtime fromUnixtime, C context) { return visitScalarFunction(fromUnixtime, context); } @@ -1355,6 +1366,10 @@ default R visitJsonKeys(JsonKeys jsonKeys, C context) { return visitScalarFunction(jsonKeys, context); } + default R visitJsonSearch(JsonSearch jsonSearch, C context) { + return visitScalarFunction(jsonSearch, context); + } + default R visitJsonInsert(JsonInsert jsonInsert, C context) { return visitScalarFunction(jsonInsert, context); } @@ -1635,6 +1650,10 @@ default R visitNgramSearch(NgramSearch ngramSearch, C context) { return visitScalarFunction(ngramSearch, context); } + default R visitNormalCdf(NormalCdf normalCdf, C context) { + return visitScalarFunction(normalCdf, context); + } + default R visitNotNullOrEmpty(NotNullOrEmpty notNullOrEmpty, C context) { return visitScalarFunction(notNullOrEmpty, context); } @@ -1667,6 +1686,10 @@ default R visitUrlDecode(UrlDecode urlDecode, C context) { return visitScalarFunction(urlDecode, context); } + default R visitUrlEncode(UrlEncode urlEncode, C context) { + return visitScalarFunction(urlEncode, context); + } + default R visitRandomBytes(RandomBytes randomBytes, C context) { return visitScalarFunction(randomBytes, context); } @@ -2055,6 +2078,10 @@ default R visitToIpv6OrNull(ToIpv6OrNull toIpv6OrNull, C context) { return visitScalarFunction(toIpv6OrNull, context); } + default R visitToIso8601(ToIso8601 toIso8601, C context) { + return visitScalarFunction(toIso8601, context); + } + default R visitToMonday(ToMonday toMonday, C context) { return visitScalarFunction(toMonday, context); } @@ -2067,6 +2094,10 @@ default R visitToQuantileState(ToQuantileState toQuantileState, C context) { return visitScalarFunction(toQuantileState, context); } + default R visitTranslate(Translate translate, C context) { + return visitScalarFunction(translate, context); + } + default R visitTrim(Trim trim, C context) { return visitScalarFunction(trim, context); } @@ -2242,4 +2273,8 @@ default R visitStructElement(StructElement structElement, C context) { default R visitMultiMatch(MultiMatch multiMatch, C context) { return visitScalarFunction(multiMatch, context); } + + default R visitLastQueryId(LastQueryId queryId, C context) { + return visitScalarFunction(queryId, context); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/ComputeResultSet.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/ComputeResultSet.java index beee784ec9de91..f86e143ca7b2ae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/ComputeResultSet.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/ComputeResultSet.java @@ -19,8 +19,10 @@ import org.apache.doris.nereids.CascadesContext; import org.apache.doris.nereids.SqlCacheContext; +import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.qe.ResultSet; +import java.util.List; import java.util.Optional; /** @@ -51,5 +53,6 @@ * */ public interface ComputeResultSet { - Optional computeResultInFe(CascadesContext cascadesContext, Optional sqlCacheContext); + Optional computeResultInFe(CascadesContext cascadesContext, Optional sqlCacheContext, + List outputSlots); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/Plan.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/Plan.java index b4f4bd4d3bb191..60af0d18666f2b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/Plan.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/Plan.java @@ -73,8 +73,8 @@ default boolean hasUnboundExpression() { default boolean containsSlots(ImmutableSet slots) { return getExpressions().stream().anyMatch( - expression -> !Sets.intersection(slots, expression.getInputSlots()).isEmpty() - || children().stream().anyMatch(plan -> plan.containsSlots(slots))); + expression -> !Sets.intersection(slots, expression.getInputSlots()).isEmpty()) + || children().stream().anyMatch(plan -> plan.containsSlots(slots)); } default LogicalProperties computeLogicalProperties() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java index 73d4cb36448eb4..a5d15f1d515608 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/Project.java @@ -22,11 +22,13 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; import org.apache.doris.nereids.trees.expressions.Slot; +import org.apache.doris.nereids.trees.expressions.functions.NoneMovableFunction; import org.apache.doris.nereids.util.ExpressionUtils; import org.apache.doris.nereids.util.PlanUtils; import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; @@ -61,7 +63,15 @@ default Map getAliasToProducer() { * @return project list for merged project */ default List mergeProjections(Project childProject) { - return PlanUtils.mergeProjections(childProject.getProjects(), getProjects()); + List projects = new ArrayList<>(); + projects.addAll(PlanUtils.mergeProjections(childProject.getProjects(), getProjects())); + for (NamedExpression expression : childProject.getProjects()) { + // keep NoneMovableFunction for later use + if (expression.containsType(NoneMovableFunction.class)) { + projects.add(expression); + } + } + return projects; } /** @@ -97,4 +107,14 @@ default boolean isAllSlots() { } return true; } + + /** containsNoneMovableFunction */ + default boolean containsNoneMovableFunction() { + for (NamedExpression expression : getProjects()) { + if (expression.containsType(NoneMovableFunction.class)) { + return true; + } + } + return false; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterMTMVCommand.java index 56f1ffec182444..24cb47846da9ad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterMTMVCommand.java @@ -32,7 +32,7 @@ /** * alter multi table materialized view */ -public class AlterMTMVCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class AlterMTMVCommand extends Command implements ForwardWithSync { public static final Logger LOG = LogManager.getLogger(AlterMTMVCommand.class); private final AlterMTMVInfo alterMTMVInfo; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CancelMTMVTaskCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CancelMTMVTaskCommand.java index c8b2a19067df89..ba1078c33d9539 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CancelMTMVTaskCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CancelMTMVTaskCommand.java @@ -30,7 +30,7 @@ /** * cancel mtmv task */ -public class CancelMTMVTaskCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class CancelMTMVTaskCommand extends Command implements ForwardWithSync { private final CancelMTMVTaskInfo cancelMTMVTaskInfo; public CancelMTMVTaskCommand(CancelMTMVTaskInfo cancelMTMVTaskInfo) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateMTMVCommand.java index 0bc3c335fd12f4..0eac139d5f9ae5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateMTMVCommand.java @@ -33,7 +33,7 @@ /** * create multi table materialized view */ -public class CreateMTMVCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class CreateMTMVCommand extends Command implements ForwardWithSync { public static final Logger LOG = LogManager.getLogger(CreateMTMVCommand.class); private final CreateMTMVInfo createMTMVInfo; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/DropMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/DropMTMVCommand.java index 909b87ef9aa3f8..f5b986a3718823 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/DropMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/DropMTMVCommand.java @@ -30,7 +30,7 @@ /** * refresh mtmv */ -public class DropMTMVCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class DropMTMVCommand extends Command implements ForwardWithSync { private final DropMTMVInfo dropMTMVInfo; public DropMTMVCommand(DropMTMVInfo dropMTMVInfo) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/PauseMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/PauseMTMVCommand.java index fadb3bc0c3270a..7e130a9ab47cd7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/PauseMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/PauseMTMVCommand.java @@ -30,7 +30,7 @@ /** * pause mtmv */ -public class PauseMTMVCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class PauseMTMVCommand extends Command implements ForwardWithSync { private final PauseMTMVInfo pauseMTMVInfo; public PauseMTMVCommand(PauseMTMVInfo pauseMTMVInfo) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/RefreshMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/RefreshMTMVCommand.java index 2ed3e446d7bec0..c8ea6f19bbdb31 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/RefreshMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/RefreshMTMVCommand.java @@ -30,7 +30,7 @@ /** * refresh mtmv */ -public class RefreshMTMVCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class RefreshMTMVCommand extends Command implements ForwardWithSync { private final RefreshMTMVInfo refreshMTMVInfo; public RefreshMTMVCommand(RefreshMTMVInfo refreshMTMVInfo) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ResumeMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ResumeMTMVCommand.java index 5fc933886c2f8e..7c53636f5555ac 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ResumeMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ResumeMTMVCommand.java @@ -30,7 +30,7 @@ /** * resume mtmv */ -public class ResumeMTMVCommand extends Command implements ForwardWithSync, NotAllowFallback { +public class ResumeMTMVCommand extends Command implements ForwardWithSync { private final ResumeMTMVInfo resumeMTMVInfo; public ResumeMTMVCommand(ResumeMTMVInfo resumeMTMVInfo) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateMTMVCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateMTMVCommand.java index 7da1df6af6f30a..19ecac345b36a5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateMTMVCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateMTMVCommand.java @@ -29,7 +29,7 @@ /** * resume mtmv */ -public class ShowCreateMTMVCommand extends Command implements NoForward, NotAllowFallback { +public class ShowCreateMTMVCommand extends Command implements NoForward { private final ShowCreateMTMVInfo showCreateMTMVInfo; public ShowCreateMTMVCommand(ShowCreateMTMVInfo showCreateMTMVInfo) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java index 0b2694cc311b4c..184b6d2fa555f6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/ColumnDefinition.java @@ -318,13 +318,15 @@ public void validate(boolean isOlap, Set keysSet, Set clusterKey } defaultValue = Optional.of(DefaultValue.HLL_EMPTY_DEFAULT_VALUE); } else if (type.isBitmapType()) { - if (defaultValue.isPresent() && defaultValue.get() != DefaultValue.NULL_DEFAULT_VALUE) { - throw new AnalysisException("Bitmap type column can not set default value"); + if (defaultValue.isPresent() && isOlap && defaultValue.get() != DefaultValue.NULL_DEFAULT_VALUE + && !defaultValue.get().getValue().equals(DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE.getValue())) { + throw new AnalysisException("Bitmap type column default value only support " + + DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE); } defaultValue = Optional.of(DefaultValue.BITMAP_EMPTY_DEFAULT_VALUE); } else if (type.isArrayType() && defaultValue.isPresent() && isOlap && defaultValue.get() != DefaultValue.NULL_DEFAULT_VALUE && !defaultValue.get() - .getValue().equals(DefaultValue.ARRAY_EMPTY_DEFAULT_VALUE.getValue())) { + .getValue().equals(DefaultValue.ARRAY_EMPTY_DEFAULT_VALUE.getValue())) { throw new AnalysisException("Array type column default value only support null or " + DefaultValue.ARRAY_EMPTY_DEFAULT_VALUE); } else if (type.isMapType()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateMTMVInfo.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateMTMVInfo.java index 0bee8dd1881e64..b553dccdd8d97e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateMTMVInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateMTMVInfo.java @@ -34,8 +34,10 @@ import org.apache.doris.catalog.Type; import org.apache.doris.catalog.View; import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; import org.apache.doris.common.FeConstants; import org.apache.doris.common.FeNameFormat; +import org.apache.doris.common.UserException; import org.apache.doris.common.util.DynamicPartitionUtil; import org.apache.doris.common.util.PropertyAnalyzer; import org.apache.doris.datasource.InternalCatalog; @@ -172,8 +174,7 @@ public void analyze(ConnectContext ctx) throws Exception { final boolean finalEnableMergeOnWrite = false; Set keysSet = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); keysSet.addAll(keys); - columns.forEach(c -> c.validate(true, keysSet, Sets.newHashSet(), finalEnableMergeOnWrite, KeysType.DUP_KEYS)); - + validateColumns(this.columns, keysSet, finalEnableMergeOnWrite); if (distribution == null) { throw new AnalysisException("Create async materialized view should contain distribution desc"); } @@ -195,6 +196,18 @@ public void analyze(ConnectContext ctx) throws Exception { rewriteQuerySql(ctx); } + /**validate column name*/ + public void validateColumns(List columns, Set keysSet, + boolean finalEnableMergeOnWrite) throws UserException { + Set colSets = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER); + for (ColumnDefinition col : columns) { + if (!colSets.add(col.getName())) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_DUP_FIELDNAME, col.getName()); + } + col.validate(true, keysSet, Sets.newHashSet(), finalEnableMergeOnWrite, KeysType.DUP_KEYS); + } + } + private void rewriteQuerySql(ConnectContext ctx) { analyzeAndFillRewriteSqlMap(querySql, ctx); querySql = BaseViewInfo.rewriteSql(ctx.getStatementContext().getIndexInSqlToString(), querySql); @@ -381,7 +394,9 @@ private void analyzeExpressions(Plan plan, Map mvProperties) { List functionCollectResult = MaterializedViewUtils.extractNondeterministicFunction(plan); if (!CollectionUtils.isEmpty(functionCollectResult)) { throw new AnalysisException(String.format( - "can not contain invalid expression, the expression is %s", + "can not contain nonDeterministic expression, the expression is %s. " + + "Should add 'enable_nondeterministic_function' = 'true' property " + + "when create materialized view if you know the property real meaning entirely", functionCollectResult.stream().map(Expression::toString).collect(Collectors.joining(",")))); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateTableInfo.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateTableInfo.java index 730c6f115a3da6..9022e9deb7dfc9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateTableInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateTableInfo.java @@ -131,7 +131,6 @@ public class CreateTableInfo { private boolean isExternal = false; private String clusterName = null; private List clusterKeysColumnNames = null; - private List clusterKeysColumnIds = null; private PartitionTableInfo partitionTableInfo; // get when validate /** @@ -424,9 +423,6 @@ public void validate(ConnectContext ctx) { validateKeyColumns(); if (!clusterKeysColumnNames.isEmpty()) { - if (Config.isCloudMode()) { - throw new AnalysisException("Cluster key is not supported in cloud mode"); - } if (!isEnableMergeOnWrite) { throw new AnalysisException( "Cluster keys only support unique keys table which enabled " @@ -736,50 +732,6 @@ private void validateKeyColumns() { "The number of key columns should be less than the number of columns."); } - if (!clusterKeysColumnNames.isEmpty()) { - if (Config.isCloudMode()) { - throw new AnalysisException("Cluster key is not supported in cloud mode"); - } - if (keysType != KeysType.UNIQUE_KEYS) { - throw new AnalysisException("Cluster keys only support unique keys table."); - } - clusterKeysColumnIds = Lists.newArrayList(); - for (int i = 0; i < clusterKeysColumnNames.size(); ++i) { - String name = clusterKeysColumnNames.get(i); - // check if key is duplicate - for (int j = 0; j < i; j++) { - if (clusterKeysColumnNames.get(j).equalsIgnoreCase(name)) { - throw new AnalysisException("Duplicate cluster key column[" + name + "]."); - } - } - // check if key exists and generate key column ids - for (int j = 0; j < columns.size(); j++) { - if (columns.get(j).getName().equalsIgnoreCase(name)) { - columns.get(j).setClusterKeyId(clusterKeysColumnIds.size()); - clusterKeysColumnIds.add(j); - break; - } - if (j == columns.size() - 1) { - throw new AnalysisException( - "Key cluster column[" + name + "] doesn't exist."); - } - } - } - - int minKeySize = keys.size() < clusterKeysColumnNames.size() ? keys.size() - : clusterKeysColumnNames.size(); - boolean sameKey = true; - for (int i = 0; i < minKeySize; ++i) { - if (!keys.get(i).equalsIgnoreCase(clusterKeysColumnNames.get(i))) { - sameKey = false; - break; - } - } - if (sameKey) { - throw new AnalysisException("Unique keys and cluster keys should be different."); - } - } - for (int i = 0; i < keys.size(); ++i) { String name = columns.get(i).getName(); if (!keys.get(i).equalsIgnoreCase(name)) { @@ -815,6 +767,47 @@ private void validateKeyColumns() { } } } + + if (!clusterKeysColumnNames.isEmpty()) { + // the same code as KeysDesc#analyzeClusterKeys + if (keysType != KeysType.UNIQUE_KEYS) { + throw new AnalysisException("Cluster keys only support unique keys table"); + } + // check that cluster keys is not duplicated + for (int i = 0; i < clusterKeysColumnNames.size(); i++) { + String name = clusterKeysColumnNames.get(i); + for (int j = 0; j < i; j++) { + if (clusterKeysColumnNames.get(j).equalsIgnoreCase(name)) { + throw new AnalysisException("Duplicate cluster key column[" + name + "]."); + } + } + } + // check that cluster keys is not equal to primary keys + int minKeySize = Math.min(keys.size(), clusterKeysColumnNames.size()); + boolean sameKey = true; + for (int i = 0; i < minKeySize; ++i) { + if (!keys.get(i).equalsIgnoreCase(clusterKeysColumnNames.get(i))) { + sameKey = false; + break; + } + } + if (sameKey) { + throw new AnalysisException("Unique keys and cluster keys should be different."); + } + // check that cluster key column exists + for (int i = 0; i < clusterKeysColumnNames.size(); ++i) { + String name = clusterKeysColumnNames.get(i); + for (int j = 0; j < columns.size(); j++) { + if (columns.get(j).getName().equalsIgnoreCase(name)) { + columns.get(j).setClusterKeyId(i); + break; + } + if (j == columns.size() - 1) { + throw new AnalysisException("Cluster key column[" + name + "] doesn't exist."); + } + } + } + } } /** @@ -858,7 +851,7 @@ public CreateTableStmt translateToLegacyStmt() { return new CreateTableStmt(ifNotExists, isExternal, new TableName(ctlName, dbName, tableName), catalogColumns, catalogIndexes, engineName, - new KeysDesc(keysType, keys, clusterKeysColumnNames, clusterKeysColumnIds), + new KeysDesc(keysType, keys, clusterKeysColumnNames), partitionDesc, distributionDesc, Maps.newHashMap(properties), extProperties, comment, addRollups, null); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateViewInfo.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateViewInfo.java index a881be046e7dd1..06843a1ee558e3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateViewInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/CreateViewInfo.java @@ -47,13 +47,15 @@ */ public class CreateViewInfo extends BaseViewInfo { private final boolean ifNotExists; + private final boolean orReplace; private final String comment; /** constructor*/ - public CreateViewInfo(boolean ifNotExists, TableNameInfo viewName, String comment, + public CreateViewInfo(boolean ifNotExists, boolean orReplace, TableNameInfo viewName, String comment, String querySql, List simpleColumnDefinitions) { super(viewName, querySql, simpleColumnDefinitions); this.ifNotExists = ifNotExists; + this.orReplace = orReplace; this.comment = comment; } @@ -94,8 +96,8 @@ public CreateViewStmt translateToLegacyStmt(ConnectContext ctx) { for (SimpleColumnDefinition def : simpleColumnDefinitions) { cols.add(def.translateToColWithComment()); } - CreateViewStmt createViewStmt = new CreateViewStmt(ifNotExists, viewName.transferToTableName(), cols, comment, - null); + CreateViewStmt createViewStmt = new CreateViewStmt(ifNotExists, orReplace, viewName.transferToTableName(), cols, + comment, null); // expand star(*) in project list and replace table name with qualifier String rewrittenSql = rewriteSql(ctx.getStatementContext().getIndexInSqlToString(), querySql); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/DefaultValue.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/DefaultValue.java index 0d9de31157d4c6..e3f388d272deeb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/DefaultValue.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/DefaultValue.java @@ -31,6 +31,7 @@ public class DefaultValue { public static String CURRENT_TIMESTAMP = "CURRENT_TIMESTAMP"; public static String NOW = "now"; public static String HLL_EMPTY = "HLL_EMPTY"; + public static String BITMAP_EMPTY = "BITMAP_EMPTY"; public static DefaultValue CURRENT_DATE_DEFAULT_VALUE = new DefaultValue(CURRENT_DATE, CURRENT_DATE.toLowerCase()); public static DefaultValue CURRENT_TIMESTAMP_DEFAULT_VALUE = new DefaultValue(CURRENT_TIMESTAMP, NOW); // default null @@ -40,7 +41,7 @@ public class DefaultValue { // default "value", "0" means empty hll public static DefaultValue HLL_EMPTY_DEFAULT_VALUE = new DefaultValue(ZERO, HLL_EMPTY); // default "value", "0" means empty bitmap - public static DefaultValue BITMAP_EMPTY_DEFAULT_VALUE = new DefaultValue(ZERO); + public static DefaultValue BITMAP_EMPTY_DEFAULT_VALUE = new DefaultValue(ZERO, BITMAP_EMPTY); // default "value", "[]" means empty array public static DefaultValue ARRAY_EMPTY_DEFAULT_VALUE = new DefaultValue("[]"); // Lets use the const value from math pacakge. diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/AbstractInsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/AbstractInsertExecutor.java index cdf74f5e9aca3a..0dc5922794ea8a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/AbstractInsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/AbstractInsertExecutor.java @@ -24,7 +24,7 @@ import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; import org.apache.doris.common.FeConstants; -import org.apache.doris.common.InternalErrorCode; +import org.apache.doris.common.Status; import org.apache.doris.common.UserException; import org.apache.doris.common.util.DebugUtil; import org.apache.doris.nereids.NereidsPlanner; @@ -38,6 +38,7 @@ import org.apache.doris.qe.StmtExecutor; import org.apache.doris.task.LoadEtlTask; import org.apache.doris.thrift.TQueryType; +import org.apache.doris.thrift.TStatusCode; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -49,7 +50,9 @@ * The derived class should implement the abstract method for certain type of target table */ public abstract class AbstractInsertExecutor { + protected static final long INVALID_TXN_ID = -1L; private static final Logger LOG = LogManager.getLogger(AbstractInsertExecutor.class); + protected long jobId; protected final ConnectContext ctx; protected final Coordinator coordinator; @@ -63,6 +66,7 @@ public abstract class AbstractInsertExecutor { protected String errMsg = ""; protected Optional insertCtx; protected final boolean emptyInsert; + protected long txnId = INVALID_TXN_ID; /** * Constructor @@ -94,7 +98,9 @@ public String getLabelName() { return labelName; } - public abstract long getTxnId(); + public long getTxnId() { + return txnId; + } /** * begin transaction if necessary @@ -109,7 +115,7 @@ public String getLabelName() { /** * Do something before exec */ - protected abstract void beforeExec(); + protected abstract void beforeExec() throws UserException; /** * Do something after exec finished @@ -142,7 +148,7 @@ protected final void execImpl(StmtExecutor executor, long jobId) throws Exceptio } boolean notTimeout = coordinator.join(execTimeout); if (!coordinator.isDone()) { - coordinator.cancel(); + coordinator.cancel(new Status(TStatusCode.CANCELLED, "insert timeout")); if (notTimeout) { errMsg = coordinator.getExecStatus().getErrorMsg(); ErrorReport.reportDdlException("there exists unhealthy backend. " @@ -193,25 +199,7 @@ public void executeSingleInsert(StmtExecutor executor, long jobId) throws Except executor.updateProfile(false); execImpl(executor, jobId); checkStrictModeAndFilterRatio(); - int retryTimes = 0; - while (true) { - try { - onComplete(); - break; - } catch (UserException e) { - LOG.warn("failed to commit txn, txnId={}, jobId={}, retryTimes={}", - getTxnId(), jobId, retryTimes, e); - if (e.getErrorCode() == InternalErrorCode.DELETE_BITMAP_LOCK_ERR) { - retryTimes++; - if (retryTimes >= Config.mow_insert_into_commit_retry_times) { - // should throw exception after running out of retry times - throw e; - } - } else { - throw e; - } - } - } + onComplete(); } catch (Throwable t) { onFail(t); // retry insert into from select when meet E-230 in cloud diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/BaseExternalTableInsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/BaseExternalTableInsertExecutor.java index e456d171df5986..082f1bab7d66f8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/BaseExternalTableInsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/BaseExternalTableInsertExecutor.java @@ -46,9 +46,7 @@ * Insert executor for base external table */ public abstract class BaseExternalTableInsertExecutor extends AbstractInsertExecutor { - protected static final long INVALID_TXN_ID = -1L; private static final Logger LOG = LogManager.getLogger(BaseExternalTableInsertExecutor.class); - protected long txnId = INVALID_TXN_ID; protected TransactionStatus txnStatus = TransactionStatus.ABORTED; protected final TransactionManager transactionManager; protected final String catalogName; @@ -70,16 +68,6 @@ public BaseExternalTableInsertExecutor(ConnectContext ctx, ExternalTable table, } } - @Override - public long getTxnId() { - return txnId; - } - - /** - * collect commit infos from BEs - */ - protected abstract void setCollectCommitInfoFunc(); - /** * At this time, FE has successfully collected all commit information from BEs. * Before commit this txn, commit information need to be analyzed and processed. @@ -94,7 +82,6 @@ public long getTxnId() { @Override public void beginTransaction() { txnId = transactionManager.begin(); - setCollectCommitInfoFunc(); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/HiveInsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/HiveInsertExecutor.java index 10ff27add86708..99464ccfc01a90 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/HiveInsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/HiveInsertExecutor.java @@ -49,13 +49,7 @@ public HiveInsertExecutor(ConnectContext ctx, HMSExternalTable table, } @Override - public void setCollectCommitInfoFunc() { - HMSTransaction transaction = (HMSTransaction) transactionManager.getTransaction(txnId); - coordinator.setHivePartitionUpdateFunc(transaction::updateHivePartitionUpdates); - } - - @Override - protected void beforeExec() { + protected void beforeExec() throws UserException { // check params HMSTransaction transaction = (HMSTransaction) transactionManager.getTransaction(txnId); Preconditions.checkArgument(insertCtx.isPresent(), "insert context must be present"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/IcebergInsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/IcebergInsertExecutor.java index 86b1f1ef0b7e2d..fe8ff063571e1f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/IcebergInsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/IcebergInsertExecutor.java @@ -47,13 +47,7 @@ public IcebergInsertExecutor(ConnectContext ctx, IcebergExternalTable table, } @Override - public void setCollectCommitInfoFunc() { - IcebergTransaction transaction = (IcebergTransaction) transactionManager.getTransaction(txnId); - coordinator.setIcebergCommitDataFunc(transaction::updateIcebergCommitData); - } - - @Override - protected void beforeExec() { + protected void beforeExec() throws UserException { String dbName = ((IcebergExternalTable) table).getDbName(); String tbName = table.getName(); SimpleTableInfo tableInfo = new SimpleTableInfo(dbName, tbName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertIntoTableCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertIntoTableCommand.java index 38d0d8386307cf..74f75d2d7d5dd9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertIntoTableCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertIntoTableCommand.java @@ -237,6 +237,7 @@ public AbstractInsertExecutor initPlan(ConnectContext ctx, StmtExecutor executor executor.setProfileType(ProfileType.LOAD); // We exposed @StmtExecutor#cancel as a unified entry point for statement interruption, // so we need to set this here + insertExecutor.getCoordinator().setTxnId(insertExecutor.getTxnId()); executor.setCoord(insertExecutor.getCoordinator()); return insertExecutor; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java index afcb5ee81d2958..064fccaf521029 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java @@ -28,6 +28,7 @@ import org.apache.doris.common.util.InternalDatabaseUtil; import org.apache.doris.datasource.hive.HMSExternalTable; import org.apache.doris.datasource.iceberg.IcebergExternalTable; +import org.apache.doris.insertoverwrite.InsertOverwriteManager; import org.apache.doris.insertoverwrite.InsertOverwriteUtil; import org.apache.doris.mtmv.MTMVUtil; import org.apache.doris.mysql.privilege.PrivPredicate; @@ -60,11 +61,14 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.awaitility.Awaitility; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; /** * insert into select command implementation @@ -81,6 +85,8 @@ public class InsertOverwriteTableCommand extends Command implements ForwardWithS private LogicalPlan logicalQuery; private Optional labelName; private final Optional cte; + private AtomicBoolean isCancelled = new AtomicBoolean(false); + private AtomicBoolean isRunning = new AtomicBoolean(false); /** * constructor @@ -157,35 +163,88 @@ public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { // Do not create temp partition on FE partitionNames = new ArrayList<>(); } + InsertOverwriteManager insertOverwriteManager = Env.getCurrentEnv().getInsertOverwriteManager(); + insertOverwriteManager.recordRunningTableOrException(targetTable.getDatabase(), targetTable); + isRunning.set(true); long taskId = 0; try { if (isAutoDetectOverwrite()) { // taskId here is a group id. it contains all replace tasks made and registered in rpc process. - taskId = Env.getCurrentEnv().getInsertOverwriteManager().registerTaskGroup(); + taskId = insertOverwriteManager.registerTaskGroup(); // When inserting, BE will call to replace partition by FrontendService. FE will register new temp // partitions and return. for transactional, the replacement will really occur when insert successed, // i.e. `insertInto` finished. then we call taskGroupSuccess to make replacement. insertInto(ctx, executor, taskId); - Env.getCurrentEnv().getInsertOverwriteManager().taskGroupSuccess(taskId, (OlapTable) targetTable); + insertOverwriteManager.taskGroupSuccess(taskId, (OlapTable) targetTable); } else { List tempPartitionNames = InsertOverwriteUtil.generateTempPartitionNames(partitionNames); - taskId = Env.getCurrentEnv().getInsertOverwriteManager() + if (isCancelled.get()) { + LOG.info("insert overwrite is cancelled before registerTask, queryId: {}", + ctx.getQueryIdentifier()); + return; + } + taskId = insertOverwriteManager .registerTask(targetTable.getDatabase().getId(), targetTable.getId(), tempPartitionNames); + if (isCancelled.get()) { + LOG.info("insert overwrite is cancelled before addTempPartitions, queryId: {}", + ctx.getQueryIdentifier()); + // not need deal temp partition + insertOverwriteManager.taskSuccess(taskId); + return; + } InsertOverwriteUtil.addTempPartitions(targetTable, partitionNames, tempPartitionNames); + if (isCancelled.get()) { + LOG.info("insert overwrite is cancelled before insertInto, queryId: {}", ctx.getQueryIdentifier()); + insertOverwriteManager.taskFail(taskId); + return; + } insertInto(ctx, executor, tempPartitionNames); + if (isCancelled.get()) { + LOG.info("insert overwrite is cancelled before replacePartition, queryId: {}", + ctx.getQueryIdentifier()); + insertOverwriteManager.taskFail(taskId); + return; + } InsertOverwriteUtil.replacePartition(targetTable, partitionNames, tempPartitionNames); - Env.getCurrentEnv().getInsertOverwriteManager().taskSuccess(taskId); + if (isCancelled.get()) { + LOG.info("insert overwrite is cancelled before taskSuccess, do nothing, queryId: {}", + ctx.getQueryIdentifier()); + } + insertOverwriteManager.taskSuccess(taskId); } } catch (Exception e) { LOG.warn("insert into overwrite failed with task(or group) id " + taskId); if (isAutoDetectOverwrite()) { - Env.getCurrentEnv().getInsertOverwriteManager().taskGroupFail(taskId); + insertOverwriteManager.taskGroupFail(taskId); } else { - Env.getCurrentEnv().getInsertOverwriteManager().taskFail(taskId); + insertOverwriteManager.taskFail(taskId); } throw e; } finally { ConnectContext.get().setSkipAuth(false); + insertOverwriteManager + .dropRunningRecord(targetTable.getDatabase().getId(), targetTable.getId()); + isRunning.set(false); + } + } + + /** + * cancel insert overwrite + */ + public void cancel() { + this.isCancelled.set(true); + } + + /** + * wait insert overwrite not running + */ + public void waitNotRunning() { + long waitMaxTimeSecond = 10L; + try { + Awaitility.await().atMost(waitMaxTimeSecond, TimeUnit.SECONDS).untilFalse(isRunning); + } catch (Exception e) { + LOG.warn("waiting time exceeds {} second, stop wait, labelName: {}", waitMaxTimeSecond, + labelName.isPresent() ? labelName.get() : "", e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java index 49e7858f6faf65..e0b167b93b6357 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertUtils.java @@ -289,6 +289,12 @@ public static Plan normalizePlan(Plan plan, TableIf table, Optional) unboundLogicalSink).setPartialUpdate(false); } else { + boolean hasSyncMaterializedView = olapTable.getFullSchema().stream() + .anyMatch(col -> col.isMaterializedViewColumn()); + if (hasSyncMaterializedView) { + throw new AnalysisException("Can't do partial update on merge-on-write Unique table" + + " with sync materialized view."); + } boolean hasMissingColExceptAutoIncKey = false; for (Column col : olapTable.getFullSchema()) { Optional insertCol = unboundLogicalSink.getColNames().stream() diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/JdbcInsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/JdbcInsertExecutor.java index 928b17edf38933..fb41f71083a753 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/JdbcInsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/JdbcInsertExecutor.java @@ -90,11 +90,6 @@ protected void finalizeSink(PlanFragment fragment, DataSink sink, PhysicalSink p // do nothing } - @Override - protected void setCollectCommitInfoFunc() { - // do nothing - } - @Override protected void doBeforeCommit() throws UserException { // do nothing diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/OlapInsertExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/OlapInsertExecutor.java index b57ac3834958d6..658b154b017167 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/OlapInsertExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/OlapInsertExecutor.java @@ -72,9 +72,7 @@ * Insert executor for olap table */ public class OlapInsertExecutor extends AbstractInsertExecutor { - protected static final long INVALID_TXN_ID = -1L; private static final Logger LOG = LogManager.getLogger(OlapInsertExecutor.class); - protected long txnId = INVALID_TXN_ID; protected TransactionStatus txnStatus = TransactionStatus.ABORTED; /** @@ -85,11 +83,6 @@ public OlapInsertExecutor(ConnectContext ctx, Table table, super(ctx, table, labelName, planner, insertCtx, emptyInsert); } - @Override - public long getTxnId() { - return txnId; - } - @Override public void beginTransaction() { if (isGroupCommitHttpStream()) { @@ -173,6 +166,7 @@ public void finalizeSink(PlanFragment fragment, DataSink sink, PhysicalSink phys .createLocation(database.getId(), olapTableSink.getDstTable()); dataStreamSink.setTabletSinkLocationParam(locationParams.get(0)); dataStreamSink.setTabletSinkTxnId(olapTableSink.getTxnId()); + dataStreamSink.setTabletSinkExprs(fragment.getOutputExprs()); } } catch (Exception e) { throw new AnalysisException(e.getMessage(), e); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/distribute/worker/job/UnassignedJobBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/distribute/worker/job/UnassignedJobBuilder.java index f7357f1851994b..396ba51e01b4ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/distribute/worker/job/UnassignedJobBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/distribute/worker/job/UnassignedJobBuilder.java @@ -30,8 +30,6 @@ import org.apache.doris.planner.PlanNodeId; import org.apache.doris.planner.ScanNode; import org.apache.doris.planner.SchemaScanNode; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.SessionVariable; import org.apache.doris.thrift.TExplainLevel; import com.google.common.collect.ArrayListMultimap; @@ -255,12 +253,6 @@ private static boolean shouldAssignByBucket(PlanFragment fragment) { } private static boolean enableBucketShuffleJoin() { - if (ConnectContext.get() != null) { - SessionVariable sessionVariable = ConnectContext.get().getSessionVariable(); - if (!sessionVariable.isEnableBucketShuffleJoin() && !sessionVariable.isEnableNereidsPlanner()) { - return false; - } - } return true; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalOlapScan.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalOlapScan.java index ef3d4c43d8266f..15e06816c1a95b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalOlapScan.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalOlapScan.java @@ -538,9 +538,6 @@ public void computeUniform(DataTrait.Builder builder) { @Override public void computeEqualSet(DataTrait.Builder builder) { - if (getTable() instanceof MTMV && getTable().getName().equals("mv1")) { - System.out.println(); - } if (getTable() instanceof MTMV) { MTMV mtmv = (MTMV) getTable(); MTMVCache cache = mtmv.getCache(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalProject.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalProject.java index 1484030e9c25a9..9174e4a40fb2bf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalProject.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalProject.java @@ -27,6 +27,7 @@ import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; import org.apache.doris.nereids.trees.expressions.Slot; +import org.apache.doris.nereids.trees.expressions.functions.NoneMovableFunction; import org.apache.doris.nereids.trees.expressions.functions.scalar.Uuid; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.PlanType; @@ -43,6 +44,7 @@ import com.google.common.collect.ImmutableSet; import org.json.JSONObject; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -192,7 +194,15 @@ public List getOutputs() { @Override public Plan pruneOutputs(List prunedOutputs) { - return withProjects(prunedOutputs); + List allProjects = new ArrayList<>(prunedOutputs); + for (NamedExpression expression : projects) { + if (expression.containsType(NoneMovableFunction.class)) { + if (!prunedOutputs.contains(expression)) { + allProjects.add(expression); + } + } + } + return withProjects(allProjects); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalEmptyRelation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalEmptyRelation.java index e01c3ead327b65..30beb0d2f41aae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalEmptyRelation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalEmptyRelation.java @@ -116,11 +116,9 @@ public PhysicalPlan withPhysicalPropertiesAndStats(PhysicalProperties physicalPr @Override public Optional computeResultInFe(CascadesContext cascadesContext, - Optional sqlCacheContext) { + Optional sqlCacheContext, List outputSlots) { List columns = Lists.newArrayList(); - List outputSlots = getOutput(); - for (int i = 0; i < outputSlots.size(); i++) { - NamedExpression output = outputSlots.get(i); + for (NamedExpression output : outputSlots) { columns.add(new Column(output.getName(), output.getDataType().toCatalogDataType())); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalOneRowRelation.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalOneRowRelation.java index cd068316b8ccf8..e3c1ca7f493525 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalOneRowRelation.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalOneRowRelation.java @@ -28,6 +28,7 @@ import org.apache.doris.nereids.properties.PhysicalProperties; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; +import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.literal.Literal; import org.apache.doris.nereids.trees.plans.ComputeResultSet; import org.apache.doris.nereids.trees.plans.Plan; @@ -136,19 +137,24 @@ public PhysicalOneRowRelation withPhysicalPropertiesAndStats(PhysicalProperties @Override public Optional computeResultInFe( - CascadesContext cascadesContext, Optional sqlCacheContext) { + CascadesContext cascadesContext, Optional sqlCacheContext, List outputSlots) { List columns = Lists.newArrayList(); List data = Lists.newArrayList(); - for (int i = 0; i < projects.size(); i++) { - NamedExpression item = projects.get(i); - NamedExpression output = getOutput().get(i); - Expression expr = item.child(0); - if (expr instanceof Literal) { - LiteralExpr legacyExpr = ((Literal) expr).toLegacyLiteral(); - columns.add(new Column(output.getName(), output.getDataType().toCatalogDataType())); - data.add(legacyExpr.getStringValueInFe(cascadesContext.getStatementContext().getFormatOptions())); - } else { - return Optional.empty(); + for (Slot outputSlot : outputSlots) { + for (int i = 0; i < projects.size(); i++) { + NamedExpression item = projects.get(i); + NamedExpression output = getOutput().get(i); + if (!outputSlot.getExprId().equals(output.getExprId())) { + continue; + } + Expression expr = item.child(0); + if (expr instanceof Literal) { + LiteralExpr legacyExpr = ((Literal) expr).toLegacyLiteral(); + columns.add(new Column(output.getName(), output.getDataType().toCatalogDataType())); + data.add(legacyExpr.getStringValueInFe(cascadesContext.getStatementContext().getFormatOptions())); + } else { + return Optional.empty(); + } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalResultSink.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalResultSink.java index 8fb6dfb286e2de..46df134c0cd4c1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalResultSink.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalResultSink.java @@ -24,6 +24,7 @@ import org.apache.doris.nereids.properties.PhysicalProperties; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; +import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.plans.ComputeResultSet; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.PlanType; @@ -129,10 +130,10 @@ public PhysicalResultSink resetLogicalProperties() { @Override public Optional computeResultInFe( - CascadesContext cascadesContext, Optional sqlCacheContext) { + CascadesContext cascadesContext, Optional sqlCacheContext, List outputSlots) { CHILD_TYPE child = child(); if (child instanceof ComputeResultSet) { - return ((ComputeResultSet) child).computeResultInFe(cascadesContext, sqlCacheContext); + return ((ComputeResultSet) child).computeResultInFe(cascadesContext, sqlCacheContext, outputSlots); } else { return Optional.empty(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java index 549b70a296de69..f0fde282011ddd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java @@ -167,7 +167,7 @@ public String getChildrenTreeString() { @Override public Optional computeResultInFe( - CascadesContext cascadesContext, Optional sqlCacheContext) { + CascadesContext cascadesContext, Optional sqlCacheContext, List outputSlots) { return resultSet; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 0e7dba0bba7659..dc6f43afee267e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -1576,7 +1576,9 @@ public void logPartitionRename(TableInfo tableInfo) { } public void logColumnRename(TableRenameColumnInfo info) { - logEdit(OperationType.OP_RENAME_COLUMN, info); + long logId = logEdit(OperationType.OP_RENAME_COLUMN, info); + LOG.info("log column rename, logId : {}, infos: {}", logId, info); + Env.getCurrentEnv().getBinlogManager().addColumnRename(info, logId); } public void logAddBroker(BrokerMgr.ModifyBrokerInfo info) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/TableRenameColumnInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/TableRenameColumnInfo.java index eafdb943e1164b..1a9e21b776f659 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/TableRenameColumnInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/TableRenameColumnInfo.java @@ -81,6 +81,10 @@ public static TableRenameColumnInfo read(DataInput in) throws IOException { return GsonUtils.GSON.fromJson(Text.readString(in), TableRenameColumnInfo.class); } + public String toJson() { + return GsonUtils.GSON.toJson(this); + } + @Override public boolean equals(Object obj) { if (obj == this) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java b/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java index b9cf516bc3d2cc..ef42190fa25004 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/DataStreamSink.java @@ -62,6 +62,7 @@ public class DataStreamSink extends DataSink { protected TOlapTableLocationParam tabletSinkLocationParam = null; protected TupleDescriptor tabletSinkTupleDesc = null; protected long tabletSinkTxnId = -1; + protected List tabletSinkExprs = null; public DataStreamSink() { @@ -145,6 +146,10 @@ public void setTabletSinkLocationParam(TOlapTableLocationParam locationParam) { this.tabletSinkLocationParam = locationParam; } + public void setTabletSinkExprs(List tabletSinkExprs) { + this.tabletSinkExprs = tabletSinkExprs; + } + public void setTabletSinkTxnId(long txnId) { this.tabletSinkTxnId = txnId; } @@ -224,6 +229,11 @@ protected TDataSink toThrift() { if (tabletSinkLocationParam != null) { tStreamSink.setTabletSinkLocation(tabletSinkLocationParam); } + if (tabletSinkExprs != null) { + for (Expr expr : tabletSinkExprs) { + tStreamSink.addToTabletSinkExprs(expr.treeToThrift()); + } + } tStreamSink.setTabletSinkTxnId(tabletSinkTxnId); result.setStreamSink(tStreamSink); return result; diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java index bb41761cfd550e..ae1d34308a38ed 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java @@ -484,11 +484,7 @@ public boolean isTransferQueryStatisticsWithEveryBatch() { } public int getFragmentSequenceNum() { - if (ConnectContext.get().getSessionVariable().isEnableNereidsPlanner()) { - return fragmentSequenceNum; - } else { - return fragmentId.asInt(); - } + return fragmentSequenceNum; } public void setFragmentSequenceNum(int seq) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java index 8988d3220c8672..acef987a0a3985 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/StreamLoadPlanner.java @@ -154,6 +154,15 @@ public TPipelineFragmentParams plan(TUniqueId loadId, int fragmentInstanceIdInde isPartialUpdate = false; } + if (isPartialUpdate) { + boolean hasSyncMaterializedView = destTable.getFullSchema().stream() + .anyMatch(col -> col.isMaterializedViewColumn()); + if (hasSyncMaterializedView) { + throw new DdlException("Can't do partial update on merge-on-write Unique table" + + " with sync materialized view."); + } + } + HashSet partialUpdateInputColumns = new HashSet<>(); if (isPartialUpdate) { for (Column col : destTable.getFullSchema()) { @@ -195,7 +204,8 @@ public TPipelineFragmentParams plan(TUniqueId loadId, int fragmentInstanceIdInde + " by generated columns, missing: " + col.getName()); } } - if (taskInfo.getMergeType() == LoadTask.MergeType.DELETE) { + if (taskInfo.getMergeType() == LoadTask.MergeType.DELETE + || taskInfo.getMergeType() == LoadTask.MergeType.MERGE) { partialUpdateInputColumns.add(Column.DELETE_SIGN); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/plsql/executor/PlsqlQueryExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/plsql/executor/PlsqlQueryExecutor.java index f9c7ca767fc13a..37a8cf310a1d95 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/plsql/executor/PlsqlQueryExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/plsql/executor/PlsqlQueryExecutor.java @@ -20,7 +20,6 @@ import org.apache.doris.catalog.MysqlColType; import org.apache.doris.catalog.PrimitiveType; import org.apache.doris.catalog.Type; -import org.apache.doris.mysql.MysqlCommand; import org.apache.doris.plsql.exception.QueryException; import org.apache.doris.qe.AutoCloseConnectContext; import org.apache.doris.qe.ConnectContext; @@ -47,7 +46,7 @@ public QueryResult executeQuery(String sql, ParserRuleContext ctx) { autoCloseCtx.call(); context.setRunProcedure(true); ConnectProcessor processor = new MysqlConnectProcessor(context); - processor.executeQuery(MysqlCommand.COM_QUERY, sql); + processor.executeQuery(sql); StmtExecutor executor = context.getExecutor(); if (executor.getParsedStmt().getResultExprs() != null) { return new QueryResult(new DorisRowResult(executor.getCoord(), executor.getColumns(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/plugin/PropertiesUtils.java b/fe/fe-core/src/main/java/org/apache/doris/plugin/PropertiesUtils.java new file mode 100644 index 00000000000000..953a35787b7547 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/plugin/PropertiesUtils.java @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.plugin; + +import org.apache.doris.common.Config; +import org.apache.doris.common.EnvUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +public class PropertiesUtils { + public static final String ACCESS_PROPERTIES_FILE_DIR = Config.authorization_config_file_path; + + public static Map loadAccessControllerPropertiesOrNull() throws IOException { + String configFilePath = EnvUtils.getDorisHome() + ACCESS_PROPERTIES_FILE_DIR; + if (new File(configFilePath).exists()) { + Properties properties = new Properties(); + properties.load(Files.newInputStream(Paths.get(configFilePath))); + return propertiesToMap(properties); + } + return null; + } + + public static Map propertiesToMap(Properties properties) { + Map map = new HashMap<>(); + for (Map.Entry entry : properties.entrySet()) { + String key = String.valueOf(entry.getKey()); + String value = String.valueOf(entry.getValue()); + map.put(key, value); + } + return map; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/AutoCloseConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/qe/AutoCloseConnectContext.java index ffebe97e706548..0c400950c58052 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/AutoCloseConnectContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/AutoCloseConnectContext.java @@ -36,6 +36,7 @@ public void call() { @Override public void close() { + connectContext.clear(); ConnectContext.remove(); if (previousContext != null) { previousContext.setThreadLocalInfo(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java index fa81825d370bc0..dd00944c64f6c5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java @@ -122,6 +122,7 @@ public enum ConnectType { protected volatile TUniqueId queryId = null; protected volatile AtomicInteger instanceIdGenerator = new AtomicInteger(); protected volatile String traceId; + protected volatile TUniqueId lastQueryId = null; // id for this connection protected volatile int connectionId; // Timestamp when the connection is make @@ -832,6 +833,11 @@ public StmtExecutor getExecutor() { return executor; } + public void clear() { + executor = null; + statementContext = null; + } + public PlSqlOperation getPlSqlOperation() { if (plSqlOperation == null) { plSqlOperation = new PlSqlOperation(); @@ -861,6 +867,9 @@ public void setKilled() { } public void setQueryId(TUniqueId queryId) { + if (this.queryId != null) { + this.lastQueryId = this.queryId.deepCopy(); + } this.queryId = queryId; if (connectScheduler != null && !Strings.isNullOrEmpty(traceId)) { connectScheduler.putTraceId2QueryId(traceId, queryId); @@ -879,6 +888,10 @@ public TUniqueId queryId() { return queryId; } + public TUniqueId getLastQueryId() { + return lastQueryId; + } + public TUniqueId nextInstanceId() { return new TUniqueId(queryId.hi, queryId.lo + instanceIdGenerator.incrementAndGet()); } @@ -925,7 +938,7 @@ public void kill(boolean killConnection) { closeChannel(); } // Now, cancel running query. - cancelQuery(); + cancelQuery(new Status(TStatusCode.CANCELLED, "cancel query by user")); } // kill operation with no protect by timeout. @@ -947,10 +960,10 @@ private void killByTimeout(boolean killConnection) { } } - public void cancelQuery() { + public void cancelQuery(Status cancelReason) { StmtExecutor executorRef = executor; if (executorRef != null) { - executorRef.cancel(); + executorRef.cancel(cancelReason); } } @@ -1157,10 +1170,10 @@ public static String cloudNoBackendsReason() { StringBuilder sb = new StringBuilder(); if (ConnectContext.get() != null) { String clusterName = ConnectContext.get().getCloudCluster(); - String hits = "or you may not have permission to access the current cluster = "; + String hits = "or you may not have permission to access the current compute group = "; sb.append(" "); if (Strings.isNullOrEmpty(clusterName)) { - return sb.append(hits).append("cluster name empty").toString(); + return sb.append(hits).append("compute group name empty").toString(); } String clusterStatus = ((CloudSystemInfoService) Env.getCurrentSystemInfo()) .getCloudStatusByName(clusterName); @@ -1193,12 +1206,12 @@ public CloudClusterResult getCloudClusterByPolicy() { // valid r = new CloudClusterResult(defaultCloudCluster, CloudClusterResult.Comment.FOUND_BY_DEFAULT_CLUSTER); - LOG.info("use default cluster {}", defaultCloudCluster); + LOG.info("use default compute group {}", defaultCloudCluster); } else { // invalid r = new CloudClusterResult(defaultCloudCluster, CloudClusterResult.Comment.DEFAULT_CLUSTER_SET_BUT_NOT_EXIST); - LOG.warn("default cluster {} current invalid, please change it", r); + LOG.warn("default compute group {} current invalid, please change it", r); } return r; } @@ -1214,7 +1227,7 @@ public CloudClusterResult getCloudClusterByPolicy() { .getBackendsByClusterName(cloudClusterName); AtomicBoolean hasAliveBe = new AtomicBoolean(false); bes.stream().filter(Backend::isAlive).findAny().ifPresent(backend -> { - LOG.debug("get a clusterName {}, it's has more than one alive be {}", cloudCluster, backend); + LOG.debug("get a compute group {}, it's has more than one alive be {}", cloudCluster, backend); hasAliveBe.set(true); }); if (hasAliveBe.get()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java index 8289b27f0690c1..6308dfd23588f1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java @@ -48,7 +48,6 @@ import org.apache.doris.datasource.CatalogIf; import org.apache.doris.metric.MetricRepo; import org.apache.doris.mysql.MysqlChannel; -import org.apache.doris.mysql.MysqlCommand; import org.apache.doris.mysql.MysqlPacket; import org.apache.doris.mysql.MysqlSerializer; import org.apache.doris.mysql.MysqlServerStatusFlag; @@ -56,7 +55,6 @@ import org.apache.doris.nereids.SqlCacheContext.CacheKeyType; import org.apache.doris.nereids.StatementContext; import org.apache.doris.nereids.exceptions.NotSupportedException; -import org.apache.doris.nereids.exceptions.ParseException; import org.apache.doris.nereids.glue.LogicalPlanAdapter; import org.apache.doris.nereids.minidump.MinidumpUtils; import org.apache.doris.nereids.parser.Dialect; @@ -218,7 +216,7 @@ protected void auditAfterExec(String origStmt, StatementBase parsedStmt, } // only throw an exception when there is a problem interacting with the requesting client - protected void handleQuery(MysqlCommand mysqlCommand, String originStmt) throws ConnectionException { + protected void handleQuery(String originStmt) throws ConnectionException { if (Config.isCloudMode()) { if (!ctx.getCurrentUserIdentity().isRootUser() && ((CloudSystemInfoService) Env.getCurrentSystemInfo()).getInstanceStatus() @@ -233,7 +231,7 @@ protected void handleQuery(MysqlCommand mysqlCommand, String originStmt) throws } } try { - executeQuery(mysqlCommand, originStmt); + executeQuery(originStmt); } catch (ConnectionException exception) { throw exception; } catch (Exception ignored) { @@ -241,7 +239,7 @@ protected void handleQuery(MysqlCommand mysqlCommand, String originStmt) throws } } - public void executeQuery(MysqlCommand mysqlCommand, String originStmt) throws Exception { + public void executeQuery(String originStmt) throws Exception { if (MetricRepo.isInit) { MetricRepo.COUNTER_REQUEST_ALL.increase(1L); MetricRepo.increaseClusterRequestAll(ctx.getCloudCluster(false)); @@ -252,89 +250,37 @@ public void executeQuery(MysqlCommand mysqlCommand, String originStmt) throws Ex ctx.setSqlHash(sqlHash); SessionVariable sessionVariable = ctx.getSessionVariable(); - boolean wantToParseSqlFromSqlCache = sessionVariable.isEnableNereidsPlanner() - && CacheAnalyzer.canUseSqlCache(sessionVariable); + boolean wantToParseSqlFromSqlCache = CacheAnalyzer.canUseSqlCache(sessionVariable); List stmts = null; - Exception nereidsParseException = null; - Exception nereidsSyntaxException = null; long parseSqlStartTime = System.currentTimeMillis(); List cachedStmts = null; CacheKeyType cacheKeyType = null; - if (sessionVariable.isEnableNereidsPlanner()) { - if (wantToParseSqlFromSqlCache) { - cachedStmts = parseFromSqlCache(originStmt); - Optional sqlCacheContext = ConnectContext.get() - .getStatementContext().getSqlCacheContext(); - if (sqlCacheContext.isPresent()) { - cacheKeyType = sqlCacheContext.get().getCacheKeyType(); - } - if (cachedStmts != null) { - stmts = cachedStmts; - } - } - - if (cachedStmts == null) { - try { - stmts = new NereidsParser().parseSQL(convertedStmt, sessionVariable); - } catch (NotSupportedException e) { - // Parse sql failed, audit it and return - handleQueryException(e, convertedStmt, null, null); - return; - } catch (ParseException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Nereids parse sql failed. Reason: {}. Statement: \"{}\".", - e.getMessage(), convertedStmt); - } - // ATTN: Do not set nereidsParseException in this case. - // Because ParseException means the sql is not supported by Nereids. - // It should be parsed by old parser, so not setting nereidsParseException to avoid - // suppressing the exception thrown by old parser. - nereidsParseException = e; - } catch (Exception e) { - // TODO: We should catch all exception here until we support all query syntax. - if (LOG.isDebugEnabled()) { - LOG.debug("Nereids parse sql failed with other exception. Reason: {}. Statement: \"{}\".", - e.getMessage(), convertedStmt); - } - nereidsSyntaxException = e; - } + if (wantToParseSqlFromSqlCache) { + cachedStmts = parseFromSqlCache(originStmt); + Optional sqlCacheContext = ConnectContext.get() + .getStatementContext().getSqlCacheContext(); + if (sqlCacheContext.isPresent()) { + cacheKeyType = sqlCacheContext.get().getCacheKeyType(); } - - if (stmts == null) { - String errMsg; - Throwable exception = null; - if (nereidsParseException != null) { - errMsg = nereidsParseException.getMessage(); - exception = nereidsParseException; - } else if (nereidsSyntaxException != null) { - errMsg = nereidsSyntaxException.getMessage(); - exception = nereidsSyntaxException; - } else { - errMsg = "Nereids parse statements failed. " + originStmt; - } - if (exception == null) { - exception = new AnalysisException(errMsg); - } else { - exception = new AnalysisException(errMsg, exception); - } - handleQueryException(exception, originStmt, null, null); - return; + if (cachedStmts != null) { + stmts = cachedStmts; } } - // stmts == null when Nereids cannot planner this query or Nereids is disabled. if (stmts == null) { - if (mysqlCommand == MysqlCommand.COM_STMT_PREPARE) { - // avoid fall back to legacy planner - ctx.getState().setError(ErrorCode.ERR_UNSUPPORTED_PS, "Not supported such prepared statement"); - ctx.getState().setErrType(QueryState.ErrType.OTHER_ERR); - return; - } try { - stmts = parse(convertedStmt); - } catch (Throwable throwable) { + stmts = new NereidsParser().parseSQL(convertedStmt, sessionVariable); + } catch (NotSupportedException e) { // Parse sql failed, audit it and return - handleQueryException(throwable, convertedStmt, null, null); + handleQueryException(e, convertedStmt, null, null); + return; + } catch (Exception e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Nereids parse sql failed. Reason: {}. Statement: \"{}\".", + e.getMessage(), convertedStmt, e); + } + Throwable exception = new AnalysisException(e.getMessage(), e); + handleQueryException(exception, originStmt, null, null); return; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java index cafe9edd3a18f0..43fa4dddca7844 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectScheduler.java @@ -19,6 +19,7 @@ import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.Env; +import org.apache.doris.common.Status; import org.apache.doris.common.ThreadPoolManager; import org.apache.doris.common.util.DebugUtil; import org.apache.doris.mysql.privilege.PrivPredicate; @@ -145,11 +146,11 @@ public ConnectContext getContext(String flightToken) { return null; } - public void cancelQuery(String queryId) { + public void cancelQuery(String queryId, Status cancelReason) { for (ConnectContext ctx : connectionMap.values()) { TUniqueId qid = ctx.queryId(); if (qid != null && DebugUtil.printId(qid).equals(queryId)) { - ctx.cancelQuery(); + ctx.cancelQuery(cancelReason); break; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java index 5e3a59d9a54d96..4753436196ddf2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java @@ -36,6 +36,8 @@ import org.apache.doris.common.util.TimeUtils; import org.apache.doris.datasource.ExternalScanNode; import org.apache.doris.datasource.FileQueryScanNode; +import org.apache.doris.datasource.hive.HMSTransaction; +import org.apache.doris.datasource.iceberg.IcebergTransaction; import org.apache.doris.load.loadv2.LoadJob; import org.apache.doris.metric.MetricRepo; import org.apache.doris.mysql.MysqlCommand; @@ -94,8 +96,6 @@ import org.apache.doris.thrift.TFileScanRange; import org.apache.doris.thrift.TFileScanRangeParams; import org.apache.doris.thrift.TFragmentInstanceReport; -import org.apache.doris.thrift.THivePartitionUpdate; -import org.apache.doris.thrift.TIcebergCommitData; import org.apache.doris.thrift.TNetworkAddress; import org.apache.doris.thrift.TPaloScanRange; import org.apache.doris.thrift.TPipelineFragmentParams; @@ -251,12 +251,6 @@ public class Coordinator implements CoordInterface { private final List commitInfos = Lists.newArrayList(); private final List errorTabletInfos = Lists.newArrayList(); - // Collect all hivePartitionUpdates obtained from be - Consumer> hivePartitionUpdateFunc; - - // Collect all icebergCommitData obtained from be - Consumer> icebergCommitDataFunc; - // Input parameter private long jobId = -1; // job which this task belongs to private TUniqueId queryId; @@ -426,15 +420,12 @@ private void setFromUserProperty(ConnectContext connectContext) { private void initQueryOptions(ConnectContext context) { this.queryOptions = context.getSessionVariable().toThrift(); - this.queryOptions.setBeExecVersion(Config.be_exec_version); this.queryOptions.setQueryTimeout(context.getExecTimeout()); this.queryOptions.setExecutionTimeout(context.getExecTimeout()); if (this.queryOptions.getExecutionTimeout() < 1) { LOG.info("try set timeout less than 1", new RuntimeException("")); } - this.queryOptions.setEnableScanNodeRunSerial(context.getSessionVariable().isEnableScanRunSerial()); this.queryOptions.setFeProcessUuid(ExecuteEnv.getInstance().getProcessUUID()); - this.queryOptions.setWaitFullBlockScheduleTimes(context.getSessionVariable().getWaitFullBlockScheduleTimes()); this.queryOptions.setMysqlRowBinaryFormat( context.getCommand() == MysqlCommand.COM_STMT_EXECUTE); } @@ -487,6 +478,10 @@ public long getTxnId() { return txnId; } + public void setTxnId(long txnId) { + this.txnId = txnId; + } + public String getLabel() { return label; } @@ -1283,18 +1278,11 @@ public Status shouldCancel(List currentBackends) { } } - // Cancel execution of query. This includes the execution of the local plan - // fragment, - // if any, as well as all plan fragments on remote nodes. - public void cancel() { - cancel(new Status(TStatusCode.CANCELLED, "query is cancelled by user")); + @Override + public void cancel(Status cancelReason) { if (queueToken != null) { queueToken.cancel(); } - } - - @Override - public void cancel(Status cancelReason) { for (ScanNode scanNode : scanNodes) { scanNode.stop(); } @@ -2384,14 +2372,6 @@ private void updateScanRangeNumByScanRange(TScanRangeParams param) { // TODO: more ranges? } - public void setHivePartitionUpdateFunc(Consumer> hivePartitionUpdateFunc) { - this.hivePartitionUpdateFunc = hivePartitionUpdateFunc; - } - - public void setIcebergCommitDataFunc(Consumer> icebergCommitDataFunc) { - this.icebergCommitDataFunc = icebergCommitDataFunc; - } - // update job progress from BE public void updateFragmentExecStatus(TReportExecStatusParams params) { PipelineExecContext ctx = pipelineExecContexts.get(Pair.of(params.getFragmentId(), params.getBackendId())); @@ -2444,11 +2424,13 @@ public void updateFragmentExecStatus(TReportExecStatusParams params) { if (params.isSetErrorTabletInfos()) { updateErrorTabletInfos(params.getErrorTabletInfos()); } - if (params.isSetHivePartitionUpdates() && hivePartitionUpdateFunc != null) { - hivePartitionUpdateFunc.accept(params.getHivePartitionUpdates()); + if (params.isSetHivePartitionUpdates()) { + ((HMSTransaction) Env.getCurrentEnv().getGlobalExternalTransactionInfoMgr().getTxnById(txnId)) + .updateHivePartitionUpdates(params.getHivePartitionUpdates()); } - if (params.isSetIcebergCommitDatas() && icebergCommitDataFunc != null) { - icebergCommitDataFunc.accept(params.getIcebergCommitDatas()); + if (params.isSetIcebergCommitDatas()) { + ((IcebergTransaction) Env.getCurrentEnv().getGlobalExternalTransactionInfoMgr().getTxnById(txnId)) + .updateIcebergCommitData(params.getIcebergCommitDatas()); } if (ctx.done) { @@ -2591,13 +2573,6 @@ public BucketShuffleJoinController(Map> fragmentIdT // check whether the node fragment is bucket shuffle join fragment protected boolean isBucketShuffleJoin(int fragmentId, PlanNode node) { - if (ConnectContext.get() != null) { - if (!ConnectContext.get().getSessionVariable().isEnableBucketShuffleJoin() - && !ConnectContext.get().getSessionVariable().isEnableNereidsPlanner()) { - return false; - } - } - // check the node is be the part of the fragment if (fragmentId != node.getFragmentId().asInt()) { return false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java index edc174d9d2f7e4..830797e094eb31 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MasterOpExecutor.java @@ -349,7 +349,9 @@ public static class ForwardToMasterException extends RuntimeException { .put(TTransportException.UNKNOWN, "Unknown exception") .put(TTransportException.NOT_OPEN, "Connection is not open") .put(TTransportException.ALREADY_OPEN, "Connection has already opened up") - .put(TTransportException.TIMED_OUT, "Connection timeout") + .put(TTransportException.TIMED_OUT, + "Connection timeout, please check network state or enlarge session variable:" + + "`query_timeout`/`insert_timeout`") .put(TTransportException.END_OF_FILE, "EOF") .put(TTransportException.CORRUPTED_DATA, "Corrupted data") .build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java index 0f3de945f8508b..f5062ac392660c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/MysqlConnectProcessor.java @@ -182,7 +182,7 @@ private void handleExecute() { } // Process COM_QUERY statement, - private void handleQuery(MysqlCommand mysqlCommand) throws ConnectionException { + private void handleQuery() throws ConnectionException { // convert statement to Java string byte[] bytes = packetBuf.array(); int ending = packetBuf.limit() - 1; @@ -191,7 +191,7 @@ private void handleQuery(MysqlCommand mysqlCommand) throws ConnectionException { } String originStmt = new String(bytes, 1, ending, StandardCharsets.UTF_8); - handleQuery(mysqlCommand, originStmt); + handleQuery(originStmt); } private void dispatch() throws IOException { @@ -200,7 +200,7 @@ private void dispatch() throws IOException { if (command == null) { ErrorReport.report(ErrorCode.ERR_UNKNOWN_COM_ERROR); ctx.getState().setError(ErrorCode.ERR_UNKNOWN_COM_ERROR, "Unknown command(" + code + ")"); - LOG.warn("Unknown command(" + code + ")"); + LOG.warn("Unknown command({})", code); return; } if (LOG.isDebugEnabled()) { @@ -219,7 +219,7 @@ private void dispatch() throws IOException { break; case COM_QUERY: case COM_STMT_PREPARE: - handleQuery(command); + handleQuery(); break; case COM_STMT_EXECUTE: handleExecute(); @@ -283,6 +283,8 @@ public void processOnce() throws IOException { finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); + ctx.clear(); + executor = null; } public void loop() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java b/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java index 7d501fb5c11615..667d15de1671bf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/QeProcessorImpl.java @@ -244,25 +244,6 @@ public TReportExecStatusResult reportExecStatus(TReportExecStatusParams params, } } - if (params.isSetProfile() || params.isSetLoadChannelProfile()) { - LOG.info("Reporting profile, query_id={}, fragment {} backend num: {}, ip: {}", - DebugUtil.printId(params.query_id), params.getFragmentId(), params.backend_num, beAddr); - if (LOG.isDebugEnabled()) { - LOG.debug("params: {}", params); - } - ExecutionProfile executionProfile = ProfileManager.getInstance().getExecutionProfile(params.query_id); - if (executionProfile != null) { - // Update profile may cost a lot of time, use a seperate pool to deal with it. - writeProfileExecutor.submit(new Runnable() { - @Override - public void run() { - executionProfile.updateProfile(params); - } - }); - } else { - LOG.info("Could not find execution profile with query id {}", DebugUtil.printId(params.query_id)); - } - } final TReportExecStatusResult result = new TReportExecStatusResult(); if (params.isSetReportWorkloadRuntimeStatus()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java index 1af6239bf4c19c..e4fb6153e3359c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java @@ -240,6 +240,8 @@ public class SessionVariable implements Serializable, Writable { public static final String MAX_JOIN_NUMBER_BUSHY_TREE = "max_join_number_bushy_tree"; public static final String ENABLE_PARTITION_TOPN = "enable_partition_topn"; + public static final String PARTITION_TOPN_MAX_PARTITIONS = "partition_topn_max_partitions"; + public static final String PARTITION_TOPN_PER_PARTITION_ROWS = "partition_topn_pre_partition_rows"; public static final String GLOBAL_PARTITION_TOPN_THRESHOLD = "global_partition_topn_threshold"; @@ -296,6 +298,8 @@ public class SessionVariable implements Serializable, Writable { public static final String AUTO_BROADCAST_JOIN_THRESHOLD = "auto_broadcast_join_threshold"; + public static final String PARALLEL_PREPARE_THRESHOLD = "parallel_prepare_threshold"; + public static final String ENABLE_PROJECTION = "enable_projection"; public static final String ENABLE_SHORT_CIRCUIT_QUERY = "enable_short_circuit_query"; @@ -696,10 +700,7 @@ public class SessionVariable implements Serializable, Writable { @VariableMgr.VarAttr(name = EXEC_MEM_LIMIT) public long maxExecMemByte = 2147483648L; - @VariableMgr.VarAttr(name = SCAN_QUEUE_MEM_LIMIT, - description = {"每个 Scan Instance 的 block queue 能够保存多少字节的 block", - "How many bytes of block can be saved in the block queue of each Scan Instance"}) - // 100MB + @VariableMgr.VarAttr(name = SCAN_QUEUE_MEM_LIMIT) public long maxScanQueueMemByte = 2147483648L / 20; @VariableMgr.VarAttr(name = NUM_SCANNER_THREADS, needForward = true, description = { @@ -1046,7 +1047,7 @@ public class SessionVariable implements Serializable, Writable { @VariableMgr.VarAttr(name = PARALLEL_SCAN_MIN_ROWS_PER_SCANNER, fuzzy = true, varType = VariableAnnotation.EXPERIMENTAL, needForward = true) - private long parallelScanMinRowsPerScanner = 16384; // 16K + private long parallelScanMinRowsPerScanner = 2097152; // 2M @VariableMgr.VarAttr(name = IGNORE_STORAGE_DATA_DISTRIBUTION, fuzzy = false, varType = VariableAnnotation.EXPERIMENTAL, needForward = true) @@ -1089,6 +1090,9 @@ public class SessionVariable implements Serializable, Writable { @VariableMgr.VarAttr(name = AUTO_BROADCAST_JOIN_THRESHOLD) public double autoBroadcastJoinThreshold = 0.8; + @VariableMgr.VarAttr(name = PARALLEL_PREPARE_THRESHOLD, fuzzy = true) + public int parallelPrepareThreshold = 32; + @VariableMgr.VarAttr(name = ENABLE_COST_BASED_JOIN_REORDER) private boolean enableJoinReorderBasedCost = false; @@ -1238,6 +1242,22 @@ public void setMaxJoinNumberOfReorder(int maxJoinNumberOfReorder) { @VariableMgr.VarAttr(name = ENABLE_PARTITION_TOPN) private boolean enablePartitionTopN = true; + @VariableMgr.VarAttr(name = PARTITION_TOPN_MAX_PARTITIONS, needForward = true, description = { + "这个阈值决定了partition_topn计算时的最大分区数量,超过这个阈值后且输入总行数少于预估总量,剩余的数据将直接透传给下一个算子", + "This threshold determines how many partitions will be allocated for window function get topn." + + " if this threshold is exceeded and input rows less than the estimated total rows, the remaining" + + " data will be pass through to other node directly." + }) + private int partitionTopNMaxPartitions = 1024; + + @VariableMgr.VarAttr(name = PARTITION_TOPN_PER_PARTITION_ROWS, needForward = true, description = { + "这个数值用于partition_topn预估每个分区的行数,用来计算所有分区的预估数据总量,决定是否能透传下一个算子", + "This value is used for partition_topn to estimate the number of rows in each partition, to calculate " + + " the estimated total amount of data for all partitions, and to determine whether the next operator " + + " can be passed transparently." + }) + private int partitionTopNPerPartitionRows = 1000; + @VariableMgr.VarAttr(name = GLOBAL_PARTITION_TOPN_THRESHOLD) private double globalPartitionTopNThreshold = 100; @@ -1305,8 +1325,7 @@ public void setEnableLeftZigZag(boolean enableLeftZigZag) { * the new optimizer is fully developed. I hope that day * would be coming soon. */ - @VariableMgr.VarAttr(name = ENABLE_NEREIDS_PLANNER, needForward = true, - fuzzy = true, varType = VariableAnnotation.EXPERIMENTAL_ONLINE) + @VariableMgr.VarAttr(name = ENABLE_NEREIDS_PLANNER, needForward = true, varType = VariableAnnotation.REMOVED) private boolean enableNereidsPlanner = true; @VariableMgr.VarAttr(name = DISABLE_NEREIDS_RULES, needForward = true) @@ -2193,6 +2212,7 @@ public void initFuzzyModeVariables() { Random random = new SecureRandom(); this.parallelExecInstanceNum = random.nextInt(8) + 1; this.parallelPipelineTaskNum = random.nextInt(8); + this.parallelPrepareThreshold = random.nextInt(32) + 1; this.enableCommonExprPushdown = random.nextBoolean(); this.enableLocalExchange = random.nextBoolean(); // This will cause be dead loop, disable it first @@ -2244,8 +2264,6 @@ public void initFuzzyModeVariables() { */ // pull_request_id default value is 0. When it is 0, use default (global) session variable. if (Config.pull_request_id > 0) { - this.enableNereidsPlanner = true; - switch (Config.pull_request_id % 4) { case 0: this.runtimeFilterType |= TRuntimeFilterType.BITMAP.getValue(); @@ -3318,19 +3336,6 @@ public boolean isEnablePushDownStringMinMax() { return enablePushDownStringMinMax; } - /** - * Nereids only support vectorized engine. - * - * @return true if both nereids and vectorized engine are enabled - */ - public boolean isEnableNereidsPlanner() { - return enableNereidsPlanner; - } - - public void setEnableNereidsPlanner(boolean enableNereidsPlanner) { - this.enableNereidsPlanner = enableNereidsPlanner; - } - /** canUseNereidsDistributePlanner */ public static boolean canUseNereidsDistributePlanner() { // TODO: support cloud mode @@ -3426,7 +3431,7 @@ public void setNereidsCboPenaltyFactor(double penaltyFactor) { } public boolean isEnableNereidsTrace() { - return isEnableNereidsPlanner() && enableNereidsTrace; + return enableNereidsTrace; } public void setEnableExprTrace(boolean enableExprTrace) { @@ -3645,6 +3650,7 @@ public TQueryOptions toThrift() { tResult.setNumScannerThreads(numScannerThreads); tResult.setScannerScaleUpRatio(scannerScaleUpRatio); tResult.setMaxColumnReaderNum(maxColumnReaderNum); + tResult.setParallelPrepareThreshold(parallelPrepareThreshold); // TODO chenhao, reservation will be calculated by cost tResult.setMinReservation(0); @@ -3662,16 +3668,19 @@ public TQueryOptions toThrift() { } tResult.setCodegenLevel(codegenLevel); tResult.setBeExecVersion(Config.be_exec_version); - tResult.setEnableLocalShuffle(enableLocalShuffle && enableNereidsPlanner); + tResult.setEnableLocalShuffle(enableLocalShuffle); tResult.setParallelInstance(getParallelExecInstanceNum()); tResult.setReturnObjectDataAsBinary(returnObjectDataAsBinary); tResult.setTrimTailingSpacesForExternalTableQuery(trimTailingSpacesForExternalTableQuery); tResult.setEnableShareHashTableForBroadcastJoin(enableShareHashTableForBroadcastJoin); tResult.setEnableHashJoinEarlyStartProbe(enableHashJoinEarlyStartProbe); + tResult.setEnableScanNodeRunSerial(enableScanRunSerial); tResult.setBatchSize(batchSize); tResult.setDisableStreamPreaggregations(disableStreamPreaggregations); tResult.setEnableDistinctStreamingAggregation(enableDistinctStreamingAggregation); + tResult.setPartitionTopnMaxPartitions(partitionTopNMaxPartitions); + tResult.setPartitionTopnPrePartitionRows(partitionTopNPerPartitionRows); if (maxScanKeyNum > 0) { tResult.setMaxScanKeyNum(maxScanKeyNum); @@ -3698,6 +3707,7 @@ public TQueryOptions toThrift() { tResult.setResourceLimit(resourceLimit); } + tResult.setWaitFullBlockScheduleTimes(waitFullBlockScheduleTimes); tResult.setEnableFunctionPushdown(enableFunctionPushdown); tResult.setEnableCommonExprPushdown(enableCommonExprPushdown); tResult.setCheckOverflowForDecimal(checkOverflowForDecimal); @@ -4071,18 +4081,7 @@ public void disableConstantFoldingByBEOnce() throws DdlException { new SetVar(SessionVariable.ENABLE_FOLD_CONSTANT_BY_BE, new StringLiteral("false"))); } - public void disableNereidsPlannerOnce() throws DdlException { - if (!enableNereidsPlanner) { - return; - } - setIsSingleSetVar(true); - VariableMgr.setVar(this, new SetVar(SessionVariable.ENABLE_NEREIDS_PLANNER, new StringLiteral("false"))); - } - public void disableNereidsJoinReorderOnce() throws DdlException { - if (!enableNereidsPlanner) { - return; - } setIsSingleSetVar(true); VariableMgr.setVar(this, new SetVar(SessionVariable.DISABLE_JOIN_REORDER, new StringLiteral("true"))); } @@ -4314,7 +4313,7 @@ public int getCreateTablePartitionMaxNum() { } public boolean isIgnoreStorageDataDistribution() { - return ignoreStorageDataDistribution && enableLocalShuffle && enableNereidsPlanner; + return ignoreStorageDataDistribution && enableLocalShuffle; } public void setIgnoreStorageDataDistribution(boolean ignoreStorageDataDistribution) { @@ -4352,7 +4351,7 @@ public boolean isEnableCountPushDownForExternalTable() { } public boolean isForceToLocalShuffle() { - return enableLocalShuffle && enableNereidsPlanner && forceToLocalShuffle; + return enableLocalShuffle && forceToLocalShuffle; } public void setForceToLocalShuffle(boolean forceToLocalShuffle) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index c2170f90d5716e..3f830f977912ba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -823,8 +823,12 @@ private void handleShowCluster() throws AnalysisException { PrivPredicate.of(PrivBitSet.of(Privilege.ADMIN_PRIV), Operator.OR))) { users.removeIf(user -> !user.equals(ClusterNamespace.getNameFromFullName(ctx.getQualifiedUser()))); } + String result = Joiner.on(", ").join(users); row.add(result); + int backendNum = ((CloudSystemInfoService) Env.getCurrentEnv().getCurrentSystemInfo()) + .getBackendsByClusterName(clusterName).size(); + row.add(String.valueOf(backendNum)); rows.add(row); } @@ -1722,6 +1726,8 @@ private void handleShowRoutineLoad() throws AnalysisException { + " in db " + showRoutineLoadStmt.getDbFullName() + ". Include history? " + showRoutineLoadStmt.isIncludeHistory()); } + // sort by create time + rows.sort(Comparator.comparing(x -> x.get(2))); resultSet = new ShowResultSet(showRoutineLoadStmt.getMetaData(), rows); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index c22478710173fd..7018b2a338ffa3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -141,7 +141,6 @@ import org.apache.doris.nereids.NereidsPlanner; import org.apache.doris.nereids.PlanProcess; import org.apache.doris.nereids.StatementContext; -import org.apache.doris.nereids.exceptions.DoNotFallbackException; import org.apache.doris.nereids.exceptions.MustFallbackException; import org.apache.doris.nereids.exceptions.ParseException; import org.apache.doris.nereids.glue.LogicalPlanAdapter; @@ -154,7 +153,6 @@ import org.apache.doris.nereids.trees.plans.commands.DeleteFromCommand; import org.apache.doris.nereids.trees.plans.commands.DeleteFromUsingCommand; import org.apache.doris.nereids.trees.plans.commands.Forward; -import org.apache.doris.nereids.trees.plans.commands.NotAllowFallback; import org.apache.doris.nereids.trees.plans.commands.PrepareCommand; import org.apache.doris.nereids.trees.plans.commands.UnsupportedCommand; import org.apache.doris.nereids.trees.plans.commands.UpdateCommand; @@ -401,7 +399,7 @@ private Map getSummaryInfo(boolean isFinished) { builder.defaultCatalog(context.getCurrentCatalog().getName()); builder.defaultDb(context.getDatabase()); builder.workloadGroup(context.getWorkloadGroupName()); - builder.sqlStatement(originStmt.originStmt); + builder.sqlStatement(originStmt == null ? "" : originStmt.originStmt); builder.isCached(isCached ? "Yes" : "No"); Map beToInstancesNum = coord == null ? Maps.newTreeMap() : coord.getBeToInstancesNum(); @@ -582,17 +580,6 @@ public void queryRetry(TUniqueId queryId) throws Exception { } } - public boolean notAllowFallback(NereidsException e) { - if (e.getException() instanceof DoNotFallbackException) { - return true; - } - if (parsedStmt instanceof LogicalPlanAdapter) { - LogicalPlan logicalPlan = ((LogicalPlanAdapter) parsedStmt).getLogicalPlan(); - return logicalPlan instanceof NotAllowFallback; - } - return false; - } - public void execute(TUniqueId queryId) throws Exception { SessionVariable sessionVariable = context.getSessionVariable(); if (context.getConnectType() == ConnectType.ARROW_FLIGHT_SQL) { @@ -600,43 +587,37 @@ public void execute(TUniqueId queryId) throws Exception { } try { - if (parsedStmt instanceof LogicalPlanAdapter - || (parsedStmt == null && sessionVariable.isEnableNereidsPlanner())) { - try { - executeByNereids(queryId); - } catch (NereidsException | ParseException e) { - if (context.getMinidump() != null && context.getMinidump().toString(4) != null) { - MinidumpUtils.saveMinidumpString(context.getMinidump(), DebugUtil.printId(context.queryId())); - } - // try to fall back to legacy planner - if (LOG.isDebugEnabled()) { - LOG.debug("nereids cannot process statement\n{}\n because of {}", - originStmt.originStmt, e.getMessage(), e); - } - if (e instanceof NereidsException && notAllowFallback((NereidsException) e)) { - LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); - throw new AnalysisException(e.getMessage()); - } - if (e instanceof NereidsException - && !(((NereidsException) e).getException() instanceof MustFallbackException)) { - LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); - context.getState().setError(e.getMessage()); - return; - } - if (LOG.isDebugEnabled()) { - LOG.debug("fall back to legacy planner on statement:\n{}", originStmt.originStmt); - } - parsedStmt = null; - planner = null; - // Attention: currently exception from nereids does not mean an Exception to user terminal - // unless user does not allow fallback to lagency planner. But state of query - // has already been set to Error in this case, it will have some side effect on profile result - // and audit log. So we need to reset state to OK if query cancel be processd by lagency. - context.getState().reset(); - context.getState().setNereids(false); - executeByLegacy(queryId); + try { + executeByNereids(queryId); + } catch (NereidsException | ParseException e) { + if (context.getMinidump() != null && context.getMinidump().toString(4) != null) { + MinidumpUtils.saveMinidumpString(context.getMinidump(), DebugUtil.printId(context.queryId())); } - } else { + // try to fall back to legacy planner + if (LOG.isDebugEnabled()) { + LOG.debug("nereids cannot process statement\n{}\n because of {}", + originStmt.originStmt, e.getMessage(), e); + } + // only must fall back + unsupported command could use legacy planner + if ((e instanceof NereidsException + && !(((NereidsException) e).getException() instanceof MustFallbackException)) + || !((parsedStmt instanceof LogicalPlanAdapter + && ((LogicalPlanAdapter) parsedStmt).getLogicalPlan() instanceof Command))) { + LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); + context.getState().setError(e.getMessage()); + return; + } + if (LOG.isDebugEnabled()) { + LOG.debug("fall back to legacy planner on statement:\n{}", originStmt.originStmt); + } + parsedStmt = null; + planner = null; + // Attention: currently exception from nereids does not mean an Exception to user terminal + // unless user does not allow fallback to lagency planner. But state of query + // has already been set to Error in this case, it will have some side effect on profile result + // and audit log. So we need to reset state to OK if query cancel be processd by lagency. + context.getState().reset(); + context.getState().setNereids(false); executeByLegacy(queryId); } } finally { @@ -690,6 +671,8 @@ private void executeByNereids(TUniqueId queryId) throws Exception { context.setQueryId(queryId); context.setStartTime(); profile.getSummaryProfile().setQueryBeginTime(); + List> changedSessionVar = VariableMgr.dumpChangedVars(context.getSessionVariable()); + profile.setChangedSessionVar(DebugUtil.prettyPrintChangedSessionVar(changedSessionVar)); context.setStmtId(STMT_ID_GENERATOR.incrementAndGet()); parseByNereids(); @@ -753,7 +736,7 @@ private void executeByNereids(TUniqueId queryId) throws Exception { syncJournalIfNeeded(); try { ((Command) logicalPlan).run(context, this); - } catch (MustFallbackException | DoNotFallbackException e) { + } catch (MustFallbackException e) { if (LOG.isDebugEnabled()) { LOG.debug("Command({}) process failed.", originStmt.originStmt, e); } @@ -823,7 +806,7 @@ private void executeByNereids(TUniqueId queryId) throws Exception { try { planner.plan(parsedStmt, context.getSessionVariable().toThrift()); checkBlockRules(); - } catch (MustFallbackException | DoNotFallbackException e) { + } catch (MustFallbackException e) { LOG.warn("Nereids plan query failed:\n{}", originStmt.originStmt, e); throw new NereidsException("Command(" + originStmt.originStmt + ") process failed.", e); } catch (Exception e) { @@ -845,11 +828,16 @@ private void parseByNereids() { } catch (Exception e) { throw new ParseException("Nereids parse failed. " + e.getMessage()); } - if (statements.size() <= originStmt.idx) { - throw new ParseException("Nereids parse failed. Parser get " + statements.size() + " statements," - + " but we need at least " + originStmt.idx + " statements."); + if (statements.isEmpty()) { + // for test only + parsedStmt = new LogicalPlanAdapter(new UnsupportedCommand(), new StatementContext()); + } else { + if (statements.size() <= originStmt.idx) { + throw new ParseException("Nereids parse failed. Parser get " + statements.size() + " statements," + + " but we need at least " + originStmt.idx + " statements."); + } + parsedStmt = statements.get(originStmt.idx); } - parsedStmt = statements.get(originStmt.idx); } public void finalizeQuery() { @@ -1542,7 +1530,7 @@ private void resetAnalyzerAndStmt() { } // Because this is called by other thread - public void cancel() { + public void cancel(Status cancelReason) { if (masterOpExecutor != null) { try { masterOpExecutor.cancel(); @@ -1551,9 +1539,14 @@ public void cancel() { } return; } + Optional insertOverwriteTableCommand = getInsertOverwriteTableCommand(); + if (insertOverwriteTableCommand.isPresent()) { + // If the be scheduling has not been triggered yet, cancel the scheduling first + insertOverwriteTableCommand.get().cancel(); + } Coordinator coordRef = coord; if (coordRef != null) { - coordRef.cancel(); + coordRef.cancel(cancelReason); } if (mysqlLoadId != null) { Env.getCurrentEnv().getLoadManager().getMysqlLoadManager().cancelMySqlLoad(mysqlLoadId); @@ -1561,20 +1554,22 @@ public void cancel() { if (parsedStmt instanceof AnalyzeTblStmt || parsedStmt instanceof AnalyzeDBStmt) { Env.getCurrentEnv().getAnalysisManager().cancelSyncTask(context); } + if (insertOverwriteTableCommand.isPresent()) { + // Wait for the command to run or cancel completion + insertOverwriteTableCommand.get().waitNotRunning(); + } } - // Because this is called by other thread - public void cancel(Status cancelReason) { - Coordinator coordRef = coord; - if (coordRef != null) { - coordRef.cancel(cancelReason); - } - if (mysqlLoadId != null) { - Env.getCurrentEnv().getLoadManager().getMysqlLoadManager().cancelMySqlLoad(mysqlLoadId); - } - if (parsedStmt instanceof AnalyzeTblStmt || parsedStmt instanceof AnalyzeDBStmt) { - Env.getCurrentEnv().getAnalysisManager().cancelSyncTask(context); + private Optional getInsertOverwriteTableCommand() { + if (parsedStmt instanceof LogicalPlanAdapter) { + LogicalPlanAdapter logicalPlanAdapter = (LogicalPlanAdapter) parsedStmt; + LogicalPlan logicalPlan = logicalPlanAdapter.getLogicalPlan(); + if (logicalPlan instanceof InsertOverwriteTableCommand) { + InsertOverwriteTableCommand insertOverwriteTableCommand = (InsertOverwriteTableCommand) logicalPlan; + return Optional.of(insertOverwriteTableCommand); + } } + return Optional.empty(); } // Handle kill statement. @@ -3313,7 +3308,7 @@ public StatementBase setParsedStmt(StatementBase parsedStmt) { public List executeInternalQuery() { if (LOG.isDebugEnabled()) { - LOG.debug("INTERNAL QUERY: " + originStmt.toString()); + LOG.debug("INTERNAL QUERY: {}", originStmt.toString()); } UUID uuid = UUID.randomUUID(); TUniqueId queryId = new TUniqueId(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); @@ -3321,29 +3316,14 @@ public List executeInternalQuery() { try { List resultRows = new ArrayList<>(); try { - if (ConnectContext.get() != null - && ConnectContext.get().getSessionVariable().isEnableNereidsPlanner()) { - try { - parseByNereids(); - Preconditions.checkState(parsedStmt instanceof LogicalPlanAdapter, - "Nereids only process LogicalPlanAdapter," - + " but parsedStmt is " + parsedStmt.getClass().getName()); - context.getState().setNereids(true); - context.getState().setIsQuery(true); - planner = new NereidsPlanner(statementContext); - planner.plan(parsedStmt, context.getSessionVariable().toThrift()); - } catch (Exception e) { - LOG.warn("Fall back to legacy planner, because: {}", e.getMessage(), e); - parsedStmt = null; - planner = null; - context.getState().setNereids(false); - analyzer = new Analyzer(context.getEnv(), context); - analyze(context.getSessionVariable().toThrift()); - } - } else { - analyzer = new Analyzer(context.getEnv(), context); - analyze(context.getSessionVariable().toThrift()); - } + parseByNereids(); + Preconditions.checkState(parsedStmt instanceof LogicalPlanAdapter, + "Nereids only process LogicalPlanAdapter," + + " but parsedStmt is " + parsedStmt.getClass().getName()); + context.getState().setNereids(true); + context.getState().setIsQuery(true); + planner = new NereidsPlanner(statementContext); + planner.plan(parsedStmt, context.getSessionVariable().toThrift()); } catch (Exception e) { LOG.warn("Failed to run internal SQL: {}", originStmt, e); throw new RuntimeException("Failed to execute internal SQL. " + Util.getRootCauseMessage(e), e); @@ -3526,44 +3506,25 @@ public HttpStreamParams generateHttpStreamPlan(TUniqueId queryId) throws Excepti SessionVariable sessionVariable = context.getSessionVariable(); HttpStreamParams httpStreamParams = null; try { - if (sessionVariable.isEnableNereidsPlanner()) { - try { - // disable shuffle for http stream (only 1 sink) - sessionVariable.disableStrictConsistencyDmlOnce(); - httpStreamParams = generateHttpStreamNereidsPlan(queryId); - } catch (NereidsException | ParseException e) { - if (context.getMinidump() != null && context.getMinidump().toString(4) != null) { - MinidumpUtils.saveMinidumpString(context.getMinidump(), DebugUtil.printId(context.queryId())); - } - // try to fall back to legacy planner - if (LOG.isDebugEnabled()) { - LOG.debug("nereids cannot process statement\n{}\n because of {}", - originStmt.originStmt, e.getMessage(), e); - } - if (e instanceof NereidsException && notAllowFallback((NereidsException) e)) { - LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); - throw ((NereidsException) e).getException(); - } - if (e instanceof NereidsException - && !(((NereidsException) e).getException() instanceof MustFallbackException)) { - LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); - throw ((NereidsException) e).getException(); - } - if (LOG.isDebugEnabled()) { - LOG.debug("fall back to legacy planner on statement:\n{}", originStmt.originStmt); - } - // Attention: currently exception from nereids does not mean an Exception to user terminal - // unless user does not allow fallback to lagency planner. But state of query - // has already been set to Error in this case, it will have some side effect on profile result - // and audit log. So we need to reset state to OK if query cancel be processd by lagency. - context.getState().reset(); - context.getState().setNereids(false); - httpStreamParams = generateHttpStreamLegacyPlan(queryId); - } catch (Exception e) { - throw new RuntimeException(e); + try { + // disable shuffle for http stream (only 1 sink) + sessionVariable.disableStrictConsistencyDmlOnce(); + httpStreamParams = generateHttpStreamNereidsPlan(queryId); + } catch (NereidsException | ParseException e) { + if (context.getMinidump() != null && context.getMinidump().toString(4) != null) { + MinidumpUtils.saveMinidumpString(context.getMinidump(), DebugUtil.printId(context.queryId())); + } + // try to fall back to legacy planner + if (LOG.isDebugEnabled()) { + LOG.debug("nereids cannot process statement\n{}\n because of {}", + originStmt.originStmt, e.getMessage(), e); } - } else { - httpStreamParams = generateHttpStreamLegacyPlan(queryId); + if (e instanceof NereidsException) { + LOG.warn("Analyze failed. {}", context.getQueryIdentifier(), e); + throw ((NereidsException) e).getException(); + } + } catch (Exception e) { + throw new RuntimeException(e); } } finally { // revert Session Value diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java b/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java index 1b1a317f5f0bb5..8bb4210c605607 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/VariableMgr.java @@ -800,6 +800,42 @@ public int compare(List o1, List o2) { return changedRows; } + public static List> dumpChangedVars(SessionVariable sessionVar) { + // Hold the read lock when session dump, because this option need to access global variable. + rlock.lock(); + List> changedRows = Lists.newArrayList(); + try { + for (Map.Entry entry : ctxByDisplayVarName.entrySet()) { + VarContext ctx = entry.getValue(); + List row = Lists.newArrayList(); + String varName = entry.getKey(); + String curValue = getValue(sessionVar, ctx.getField()); + String defaultValue = ctx.getDefaultValue(); + if (VariableVarConverters.hasConverter(varName)) { + try { + defaultValue = VariableVarConverters.decode(varName, Long.valueOf(defaultValue)); + curValue = VariableVarConverters.decode(varName, Long.valueOf(curValue)); + } catch (DdlException e) { + LOG.warn("Decode session variable {} failed, reason: {}", varName, e.getMessage()); + } + } + + if (curValue.equals(defaultValue)) { + continue; + } + + row.add(varName); + row.add(curValue); + row.add(defaultValue); + changedRows.add(row); + } + } finally { + rlock.unlock(); + } + + return changedRows; + } + @Retention(RetentionPolicy.RUNTIME) public @interface VarAttr { // Name in show variables and set statement; diff --git a/fe/fe-core/src/main/java/org/apache/doris/resource/Tag.java b/fe/fe-core/src/main/java/org/apache/doris/resource/Tag.java index b88cf84282f9e2..7d6a18829cf3a4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/resource/Tag.java +++ b/fe/fe-core/src/main/java/org/apache/doris/resource/Tag.java @@ -73,7 +73,9 @@ public class Tag implements Writable { public static final String CLOUD_CLUSTER_PRIVATE_ENDPOINT = "cloud_cluster_private_endpoint"; public static final String CLOUD_CLUSTER_STATUS = "cloud_cluster_status"; - public static final String VALUE_DEFAULT_CLOUD_CLUSTER_NAME = "default_cluster"; + public static final String COMPUTE_GROUP_NAME = "compute_group_name"; + + public static final String VALUE_DEFAULT_COMPUTE_GROUP_NAME = "default_compute_group"; public static final String WORKLOAD_GROUP = "workload_group"; diff --git a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadschedpolicy/WorkloadActionCancelQuery.java b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadschedpolicy/WorkloadActionCancelQuery.java index 2dcff6075f4d74..268ccc8a5f6b50 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadschedpolicy/WorkloadActionCancelQuery.java +++ b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadschedpolicy/WorkloadActionCancelQuery.java @@ -17,7 +17,9 @@ package org.apache.doris.resource.workloadschedpolicy; +import org.apache.doris.common.Status; import org.apache.doris.qe.QeProcessorImpl; +import org.apache.doris.thrift.TStatusCode; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -32,7 +34,7 @@ public void exec(WorkloadQueryInfo queryInfo) { && queryInfo.tUniqueId != null && QeProcessorImpl.INSTANCE.getCoordinator(queryInfo.tUniqueId) != null) { LOG.info("cancel query {} triggered by query schedule policy.", queryInfo.queryId); - queryInfo.context.cancelQuery(); + queryInfo.context.cancelQuery(new Status(TStatusCode.CANCELLED, "cancel query by workload policy")); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index 6c0077ee311faf..7f36ac376bdb09 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -61,6 +61,7 @@ import org.apache.doris.common.Pair; import org.apache.doris.common.PatternMatcher; import org.apache.doris.common.PatternMatcherException; +import org.apache.doris.common.Status; import org.apache.doris.common.ThriftServerContext; import org.apache.doris.common.ThriftServerEventProcessor; import org.apache.doris.common.UserException; @@ -1065,7 +1066,7 @@ public TMasterOpResult forward(TMasterOpRequest params) throws TException { TUniqueId queryId = params.getQueryId(); ConnectContext ctx = proxyQueryIdToConnCtx.get(queryId); if (ctx != null) { - ctx.cancelQuery(); + ctx.cancelQuery(new Status(TStatusCode.CANCELLED, "cancel query by forward request.")); } final TMasterOpResult result = new TMasterOpResult(); result.setStatusCode(0); @@ -1783,13 +1784,8 @@ private boolean commitTxnImpl(TCommitTxnRequest request) throws UserException { throw new UserException("transaction [" + request.getTxnId() + "] not found"); } List tableIdList = transactionState.getTableIdList(); - List
    tableList = new ArrayList<>(); - List tables = new ArrayList<>(); // if table was dropped, transaction must be aborted - tableList = db.getTablesOnIdOrderOrThrowException(tableIdList); - for (Table table : tableList) { - tables.add(table.getName()); - } + List
    tableList = db.getTablesOnIdOrderOrThrowException(tableIdList); // Step 3: check auth if (request.isSetAuthCode()) { @@ -1797,6 +1793,7 @@ private boolean commitTxnImpl(TCommitTxnRequest request) throws UserException { } else if (request.isSetToken()) { checkToken(request.getToken()); } else { + List tables = tableList.stream().map(Table::getName).collect(Collectors.toList()); checkPasswordAndPrivs(request.getUser(), request.getPasswd(), request.getDb(), tables, request.getUserIp(), PrivPredicate.LOAD); } @@ -1991,12 +1988,7 @@ private void rollbackTxnImpl(TRollbackTxnRequest request) throws UserException { throw new UserException("transaction [" + request.getTxnId() + "] not found"); } List tableIdList = transactionState.getTableIdList(); - List
    tableList = new ArrayList<>(); - List tables = new ArrayList<>(); - tableList = db.getTablesOnIdOrderOrThrowException(tableIdList); - for (Table table : tableList) { - tables.add(table.getName()); - } + List
    tableList = db.getTablesOnIdOrderOrThrowException(tableIdList); // Step 3: check auth if (request.isSetAuthCode()) { @@ -2004,6 +1996,7 @@ private void rollbackTxnImpl(TRollbackTxnRequest request) throws UserException { } else if (request.isSetToken()) { checkToken(request.getToken()); } else { + List tables = tableList.stream().map(Table::getName).collect(Collectors.toList()); checkPasswordAndPrivs(request.getUser(), request.getPasswd(), request.getDb(), tables, request.getUserIp(), PrivPredicate.LOAD); } @@ -2103,7 +2096,7 @@ public TStreamLoadMultiTablePutResult streamLoadMultiTablePut(TStreamLoadPutRequ try { RoutineLoadJob routineLoadJob = Env.getCurrentEnv().getRoutineLoadManager() .getRoutineLoadJobByMultiLoadTaskTxnId(request.getTxnId()); - routineLoadJob.updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.CANNOT_RESUME_ERR, + routineLoadJob.updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.INTERNAL_ERR, "failed to get stream load plan, " + exception.getMessage()), false); } catch (Throwable e) { LOG.warn("catch update routine load job error.", e); @@ -3314,17 +3307,11 @@ public TStatus invalidateStatsCache(TInvalidateFollowerStatsCacheRequest request InvalidateStatsTarget target = GsonUtils.GSON.fromJson(request.key, InvalidateStatsTarget.class); AnalysisManager analysisManager = Env.getCurrentEnv().getAnalysisManager(); TableStatsMeta tableStats = analysisManager.findTableStatsStatus(target.tableId); - if (tableStats == null) { - return new TStatus(TStatusCode.OK); - } PartitionNames partitionNames = null; if (target.partitions != null) { partitionNames = new PartitionNames(false, new ArrayList<>(target.partitions)); } if (target.isTruncate) { - if (partitionNames == null || partitionNames.isStar() || partitionNames.getPartitionNames() == null) { - tableStats.clearIndexesRowCount(); - } analysisManager.submitAsyncDropStatsTask(target.catalogId, target.dbId, target.tableId, tableStats, partitionNames); } else { diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java index f83c67d2daf17c..fe0648a0680ca1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/FlightSqlConnectProcessor.java @@ -87,7 +87,7 @@ public void handleQuery(String query) throws ConnectionException { prepare(command); ctx.setRunningQuery(query); - handleQuery(command, query); + super.handleQuery(query); } // TODO diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/sessions/FlightSqlConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/sessions/FlightSqlConnectContext.java index 9f703dff92b1a9..4badae03b3141e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/sessions/FlightSqlConnectContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/arrowflight/sessions/FlightSqlConnectContext.java @@ -17,11 +17,13 @@ package org.apache.doris.service.arrowflight.sessions; +import org.apache.doris.common.Status; import org.apache.doris.mysql.MysqlChannel; import org.apache.doris.qe.ConnectContext; import org.apache.doris.qe.ConnectProcessor; import org.apache.doris.service.arrowflight.results.FlightSqlChannel; import org.apache.doris.thrift.TResultSinkType; +import org.apache.doris.thrift.TStatusCode; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -74,7 +76,7 @@ public void kill(boolean killConnection) { connectScheduler.unregisterConnection(this); } // Now, cancel running query. - cancelQuery(); + cancelQuery(new Status(TStatusCode.CANCELLED, "arrow flight query killed by user")); } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisInfoBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisInfoBuilder.java index 43f592629bd47f..73817363ef164f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisInfoBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisInfoBuilder.java @@ -61,7 +61,7 @@ public class AnalysisInfoBuilder { private boolean usingSqlForExternalTable; private long tblUpdateTime; private long rowCount; - private boolean userInject; + private boolean userInject = false; private long updateRows; private JobPriority priority; private Map partitionUpdateRows; diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java index 634f1074cd5658..c2b20707f133e8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisManager.java @@ -59,6 +59,7 @@ import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.SlotReference; import org.apache.doris.persist.AnalyzeDeletionLog; +import org.apache.doris.persist.TableStatsDeletionLog; import org.apache.doris.persist.gson.GsonUtils; import org.apache.doris.qe.ConnectContext; import org.apache.doris.qe.ShowResultSet; @@ -655,14 +656,21 @@ public void dropStats(DropStatsStmt dropStatsStmt) throws DdlException { return; } + TableStatsMeta tableStats = findTableStatsStatus(dropStatsStmt.getTblId()); + if (tableStats == null) { + return; + } Set cols = dropStatsStmt.getColumnNames(); PartitionNames partitionNames = dropStatsStmt.getPartitionNames(); long catalogId = dropStatsStmt.getCatalogIdId(); long dbId = dropStatsStmt.getDbId(); long tblId = dropStatsStmt.getTblId(); - TableStatsMeta tableStats = findTableStatsStatus(dropStatsStmt.getTblId()); - if (tableStats == null) { - return; + TableIf table = StatisticsUtil.findTable(catalogId, dbId, tblId); + // Remove tableMetaStats if drop whole table stats. + if (cols == null && (!table.isPartitionedTable() || partitionNames == null + || partitionNames.isStar() || partitionNames.getPartitionNames() == null)) { + removeTableStats(tblId); + Env.getCurrentEnv().getEditLog().logDeleteTableStats(new TableStatsDeletionLog(tblId)); } invalidateLocalStats(catalogId, dbId, tblId, cols, tableStats, partitionNames); // Drop stats ddl is master only operation. @@ -674,26 +682,32 @@ public void dropStats(DropStatsStmt dropStatsStmt) throws DdlException { StatisticsRepository.dropStatistics(catalogId, dbId, tblId, cols, partitions); } - public void dropStats(TableIf table, PartitionNames partitionNames) throws DdlException { - TableStatsMeta tableStats = findTableStatsStatus(table.getId()); - if (tableStats == null) { - return; - } - long catalogId = table.getDatabase().getCatalog().getId(); - long dbId = table.getDatabase().getId(); - long tableId = table.getId(); - if (partitionNames == null || partitionNames.isStar() || partitionNames.getPartitionNames() == null) { - tableStats.clearIndexesRowCount(); - } - submitAsyncDropStatsTask(catalogId, dbId, tableId, tableStats, partitionNames); - // Drop stats ddl is master only operation. - Set partitions = null; - if (partitionNames != null && !partitionNames.isStar() && partitionNames.getPartitionNames() != null) { - partitions = new HashSet<>(partitionNames.getPartitionNames()); + public void dropStats(TableIf table, PartitionNames partitionNames) { + try { + TableStatsMeta tableStats = findTableStatsStatus(table.getId()); + if (tableStats == null) { + return; + } + long catalogId = table.getDatabase().getCatalog().getId(); + long dbId = table.getDatabase().getId(); + long tableId = table.getId(); + if (!table.isPartitionedTable() || partitionNames == null + || partitionNames.isStar() || partitionNames.getPartitionNames() == null) { + removeTableStats(tableId); + Env.getCurrentEnv().getEditLog().logDeleteTableStats(new TableStatsDeletionLog(tableId)); + } + submitAsyncDropStatsTask(catalogId, dbId, tableId, tableStats, partitionNames); + // Drop stats ddl is master only operation. + Set partitions = null; + if (partitionNames != null && !partitionNames.isStar() && partitionNames.getPartitionNames() != null) { + partitions = new HashSet<>(partitionNames.getPartitionNames()); + } + // Drop stats ddl is master only operation. + invalidateRemoteStats(catalogId, dbId, tableId, null, partitions, true); + StatisticsRepository.dropStatistics(catalogId, dbId, table.getId(), null, partitions); + } catch (Throwable e) { + LOG.warn("Failed to drop stats for table {}", table.getName(), e); } - // Drop stats ddl is master only operation. - invalidateRemoteStats(catalogId, dbId, tableId, null, partitions, true); - StatisticsRepository.dropStatistics(catalogId, dbId, table.getId(), null, partitions); } class DropStatsTask implements Runnable { @@ -753,14 +767,9 @@ public void dropCachedStats(long catalogId, long dbId, long tableId) { public void invalidateLocalStats(long catalogId, long dbId, long tableId, Set columns, TableStatsMeta tableStats, PartitionNames partitionNames) { - if (tableStats == null) { - return; - } TableIf table = StatisticsUtil.findTable(catalogId, dbId, tableId); StatisticsCache statsCache = Env.getCurrentEnv().getStatisticsCache(); - boolean allColumn = false; if (columns == null) { - allColumn = true; columns = table.getSchemaAllIndexes(false) .stream().map(Column::getName).collect(Collectors.toSet()); } @@ -795,11 +804,16 @@ public void invalidateLocalStats(long catalogId, long dbId, long tableId, Set UNSUPPORTED_TYPE = Sets.newHashSet( Type.HLL, Type.BITMAP, Type.ARRAY, Type.STRUCT, Type.MAP, Type.QUANTILE_STATE, Type.JSONB, Type.VARIANT, Type.TIME, Type.TIMEV2, Type.LAMBDA_FUNCTION ); + // ATTENTION: Stats deriving WILL NOT use 'count' field any longer. + // Use 'rowCount' field in Statistics if needed. @SerializedName("count") public final double count; @SerializedName("ndv") @@ -122,9 +120,8 @@ public static ColumnStatistic fromResultRow(List resultRows) { // TODO: use thrift public static ColumnStatistic fromResultRow(ResultRow row) { - ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(); double count = Double.parseDouble(row.get(7)); - columnStatisticBuilder.setCount(count); + ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(count); double ndv = Double.parseDouble(row.getWithDefault(8, "0")); columnStatisticBuilder.setNdv(ndv); String nullCount = row.getWithDefault(9, "0"); @@ -190,26 +187,6 @@ public static boolean isAlmostUnique(double ndv, double rowCount) { return rowCount * ALMOST_UNIQUE_FACTOR < ndv; } - public ColumnStatistic updateByLimit(long limit, double rowCount) { - double ratio = 0; - if (rowCount != 0) { - ratio = limit / rowCount; - } - double newNdv = Math.ceil(Math.min(ndv, limit)); - return new ColumnStatisticBuilder() - .setCount(Math.ceil(limit)) - .setNdv(newNdv) - .setAvgSizeByte(Math.ceil(avgSizeByte)) - .setNumNulls(Math.ceil(numNulls * ratio)) - .setDataSize(Math.ceil(dataSize * ratio)) - .setMinValue(minValue) - .setMaxValue(maxValue) - .setMinExpr(minExpr) - .setMaxExpr(maxExpr) - .setIsUnknown(isUnKnown) - .build(); - } - public boolean hasIntersect(ColumnStatistic other) { return Math.max(this.minValue, other.minValue) <= Math.min(this.maxValue, other.maxValue); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticBuilder.java index 4c8df0bf67751d..47002355de907d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/ColumnStatisticBuilder.java @@ -56,9 +56,24 @@ public ColumnStatisticBuilder(ColumnStatistic columnStatistic) { this.updatedTime = columnStatistic.updatedTime; } - public ColumnStatisticBuilder setCount(double count) { + // ATTENTION: DON'T USE FOLLOWING TWO DURING STATS DERIVING EXCEPT FOR INITIALIZATION + public ColumnStatisticBuilder(double count) { this.count = count; - return this; + } + + public ColumnStatisticBuilder(ColumnStatistic columnStatistic, double count) { + this.count = count; + this.ndv = columnStatistic.ndv; + this.avgSizeByte = columnStatistic.avgSizeByte; + this.numNulls = columnStatistic.numNulls; + this.dataSize = columnStatistic.dataSize; + this.minValue = columnStatistic.minValue; + this.maxValue = columnStatistic.maxValue; + this.minExpr = columnStatistic.minExpr; + this.maxExpr = columnStatistic.maxExpr; + this.isUnknown = columnStatistic.isUnKnown; + this.original = columnStatistic.original; + this.updatedTime = columnStatistic.updatedTime; } public ColumnStatisticBuilder setNdv(double ndv) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java index 7ac4b95d484e04..753167fb442f41 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/OlapScanStatsDerive.java @@ -67,9 +67,6 @@ public StatsDeriveResult deriveStats() { Env.getCurrentEnv().getStatisticsCache().getColumnStatistics( table.getDatabase().getCatalog().getId(), table.getDatabase().getId(), table.getId(), -1, colName); - if (!statistic.isUnKnown) { - rowCount = statistic.count; - } columnStatisticMap.put(entry.getKey(), statistic); } return new StatsDeriveResult(rowCount, columnStatisticMap); diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatistic.java index eebe910d8b007b..7222dc8825831c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatistic.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatistic.java @@ -38,16 +38,12 @@ public class PartitionColumnStatistic { private static final Logger LOG = LogManager.getLogger(PartitionColumnStatistic.class); - public static PartitionColumnStatistic UNKNOWN = new PartitionColumnStatisticBuilder().setAvgSizeByte(1) - .setNdv(new Hll128()).setNumNulls(1).setCount(1).setMaxValue(Double.POSITIVE_INFINITY) + public static PartitionColumnStatistic UNKNOWN = new PartitionColumnStatisticBuilder(1).setAvgSizeByte(1) + .setNdv(new Hll128()).setNumNulls(1).setMaxValue(Double.POSITIVE_INFINITY) .setMinValue(Double.NEGATIVE_INFINITY) .setIsUnknown(true).setUpdatedTime("") .build(); - public static PartitionColumnStatistic ZERO = new PartitionColumnStatisticBuilder().setAvgSizeByte(0) - .setNdv(new Hll128()).setNumNulls(0).setCount(0).setMaxValue(Double.NaN).setMinValue(Double.NaN) - .build(); - public final double count; public final Hll128 ndv; public final double numNulls; @@ -109,9 +105,8 @@ public static PartitionColumnStatistic fromResultRow(ResultRow row) throws IOExc return PartitionColumnStatistic.UNKNOWN; } - PartitionColumnStatisticBuilder partitionStatisticBuilder = new PartitionColumnStatisticBuilder(); double count = Double.parseDouble(row.get(6)); - partitionStatisticBuilder.setCount(count); + PartitionColumnStatisticBuilder partitionStatisticBuilder = new PartitionColumnStatisticBuilder(count); String ndv = row.get(7); Base64.Decoder decoder = Base64.getDecoder(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(decoder.decode(ndv))); diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticBuilder.java index fe26396f21213a..b1dc7cdd0017d4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/PartitionColumnStatisticBuilder.java @@ -50,9 +50,9 @@ public PartitionColumnStatisticBuilder(PartitionColumnStatistic statistic) { this.updatedTime = statistic.updatedTime; } - public PartitionColumnStatisticBuilder setCount(double count) { + // ATTENTION: DON'T USE FOLLOWING TWO DURING STATS DERIVING EXCEPT FOR INITIALIZATION + public PartitionColumnStatisticBuilder(double count) { this.count = count; - return this; } public PartitionColumnStatisticBuilder setNdv(Hll128 ndv) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/Statistics.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/Statistics.java index 6883eb0b54208a..e18dc09792054e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/Statistics.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/Statistics.java @@ -110,7 +110,6 @@ public void enforceValid() { ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(columnStatistic); columnStatisticBuilder.setNdv(ndv); columnStatisticBuilder.setNumNulls(Math.min(columnStatistic.numNulls, rowCount - ndv)); - columnStatisticBuilder.setCount(rowCount); columnStatistic = columnStatisticBuilder.build(); expressionToColumnStats.put(entry.getKey(), columnStatistic); } @@ -228,14 +227,6 @@ public int getBENumber() { return 1; } - public static Statistics zero(Statistics statistics) { - Statistics zero = new Statistics(0, new HashMap<>()); - for (Map.Entry entry : statistics.expressionToColumnStats.entrySet()) { - zero.addColumnStats(entry.getKey(), ColumnStatistic.ZERO); - } - return zero; - } - public static double getValidSelectivity(double nullSel) { return nullSel < 0 ? 0 : (nullSel > 1 ? 1 : nullSel); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java index 0d9fa3674628c7..ba23ab84dc7a32 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatisticsRepository.java @@ -326,10 +326,9 @@ public static void alterColumnStatistics(AlterColumnStatsStmt alterColumnStatsSt if (rowCount == null) { throw new RuntimeException("Row count is null."); } - ColumnStatisticBuilder builder = new ColumnStatisticBuilder(); + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(Double.parseDouble(rowCount)); String colName = alterColumnStatsStmt.getColumnName(); Column column = objects.table.getColumn(colName); - builder.setCount(Double.parseDouble(rowCount)); if (ndv != null) { double dNdv = Double.parseDouble(ndv); builder.setNdv(dNdv); diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsDeriveResult.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsDeriveResult.java index 8c301f911be95b..977518d47ed549 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsDeriveResult.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/StatsDeriveResult.java @@ -20,8 +20,6 @@ import org.apache.doris.common.Id; import org.apache.doris.nereids.trees.expressions.Slot; -import com.google.common.base.Preconditions; - import java.util.HashMap; import java.util.List; import java.util.Map; @@ -125,25 +123,6 @@ public StatsDeriveResult withSelectivity(double selectivity) { return statsDeriveResult; } - public StatsDeriveResult updateByLimit(long limit) { - Preconditions.checkArgument(limit >= 0); - limit = Math.min(limit, (long) rowCount); - StatsDeriveResult statsDeriveResult = new StatsDeriveResult(limit, width, penalty); - for (Entry entry : slotIdToColumnStats.entrySet()) { - statsDeriveResult.addColumnStats(entry.getKey(), entry.getValue().updateByLimit(limit, rowCount)); - } - // When the table is first created, rowCount is empty. - // This leads to NPE if there is SetOperation outside the limit. - // Therefore, when rowCount is empty, slotIdToColumnStats is also imported, - // but the possible problem is that the first query statistics are not derived accurately. - if (statsDeriveResult.slotIdToColumnStats.isEmpty()) { - for (Entry entry : slotIdToColumnStats.entrySet()) { - statsDeriveResult.addColumnStats(entry.getKey(), entry.getValue()); - } - } - return statsDeriveResult; - } - public StatsDeriveResult copy() { return new StatsDeriveResult(this); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/TableStatsMeta.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/TableStatsMeta.java index de0f0eed18dae7..61a5a9b1f88457 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/TableStatsMeta.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/TableStatsMeta.java @@ -25,7 +25,6 @@ import org.apache.doris.common.io.Writable; import org.apache.doris.persist.gson.GsonPostProcessable; import org.apache.doris.persist.gson.GsonUtils; -import org.apache.doris.statistics.AnalysisInfo.AnalysisMethod; import org.apache.doris.statistics.AnalysisInfo.JobType; import org.apache.doris.statistics.util.StatisticsUtil; @@ -165,7 +164,9 @@ public Set> analyzeColumns() { public void update(AnalysisInfo analyzedJob, TableIf tableIf) { updatedTime = analyzedJob.tblUpdateTime; - userInjected = analyzedJob.userInject; + if (analyzedJob.userInject) { + userInjected = true; + } for (Pair colPair : analyzedJob.jobColumns) { ColStatsMeta colStatsMeta = colToColStatsMeta.get(colPair); if (colStatsMeta == null) { @@ -194,15 +195,15 @@ public void update(AnalysisInfo analyzedJob, TableIf tableIf) { clearStaleIndexRowCount((OlapTable) tableIf); } rowCount = analyzedJob.rowCount; - if (rowCount == 0 && AnalysisMethod.SAMPLE.equals(analyzedJob.analysisMethod)) { - return; - } if (analyzedJob.jobColumns.containsAll( tableIf.getColumnIndexPairs( tableIf.getSchemaAllIndexes(false).stream() .filter(c -> !StatisticsUtil.isUnsupportedType(c.getType())) .map(Column::getName).collect(Collectors.toSet())))) { partitionChanged.set(false); + } + // Set userInject back to false after manual analyze. + if (JobType.MANUAL.equals(jobType) && !analyzedJob.userInject) { userInjected = false; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java index 0dca003069e3bb..405b1882e7455d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java @@ -43,7 +43,6 @@ import org.apache.doris.catalog.PrimitiveType; import org.apache.doris.catalog.ScalarType; import org.apache.doris.catalog.StructType; -import org.apache.doris.catalog.TableAttributes; import org.apache.doris.catalog.TableIf; import org.apache.doris.catalog.Type; import org.apache.doris.catalog.VariantType; @@ -84,7 +83,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.text.StringSubstitutor; -import org.apache.iceberg.DataFile; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.TableScan; @@ -162,7 +160,6 @@ public static QueryState execUpdate(String sql) throws Exception { StmtExecutor stmtExecutor = null; AutoCloseConnectContext r = StatisticsUtil.buildConnectContext(); try { - r.connectContext.getSessionVariable().disableNereidsPlannerOnce(); stmtExecutor = new StmtExecutor(r.connectContext, sql); r.connectContext.setExecutor(stmtExecutor); stmtExecutor.execute(); @@ -215,7 +212,6 @@ public static AutoCloseConnectContext buildConnectContext(boolean limitScan, boo sessionVariable.enableProfile = Config.enable_profile_when_analyze; sessionVariable.parallelExecInstanceNum = Config.statistics_sql_parallel_exec_instance_num; sessionVariable.parallelPipelineTaskNum = Config.statistics_sql_parallel_exec_instance_num; - sessionVariable.setEnableNereidsPlanner(true); sessionVariable.enableScanRunSerial = limitScan; sessionVariable.setQueryTimeoutS(StatisticsUtil.getAnalyzeTimeout()); sessionVariable.insertTimeoutS = StatisticsUtil.getAnalyzeTimeout(); @@ -227,8 +223,8 @@ public static AutoCloseConnectContext buildConnectContext(boolean limitScan, boo sessionVariable.enableMaterializedViewRewrite = false; connectContext.setEnv(Env.getCurrentEnv()); connectContext.setDatabase(FeConstants.INTERNAL_DB_NAME); - connectContext.setQualifiedUser(UserIdentity.ROOT.getQualifiedUser()); - connectContext.setCurrentUserIdentity(UserIdentity.ROOT); + connectContext.setQualifiedUser(UserIdentity.ADMIN.getQualifiedUser()); + connectContext.setCurrentUserIdentity(UserIdentity.ADMIN); connectContext.setStartTime(); if (Config.isCloudMode()) { AutoCloseConnectContext ctx = new AutoCloseConnectContext(connectContext); @@ -659,20 +655,25 @@ public static long getTotalSizeFromHMS(HMSExternalTable table) { */ public static Optional getIcebergColumnStats(String colName, org.apache.iceberg.Table table) { TableScan tableScan = table.newScan().includeColumnStats(); - ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(); - columnStatisticBuilder.setCount(0); - columnStatisticBuilder.setMaxValue(Double.POSITIVE_INFINITY); - columnStatisticBuilder.setMinValue(Double.NEGATIVE_INFINITY); - columnStatisticBuilder.setDataSize(0); - columnStatisticBuilder.setAvgSizeByte(0); - columnStatisticBuilder.setNumNulls(0); + double totalDataSize = 0; + double totalDataCount = 0; + double totalNumNull = 0; try (CloseableIterable fileScanTasks = tableScan.planFiles()) { for (FileScanTask task : fileScanTasks) { - processDataFile(task.file(), task.spec(), colName, columnStatisticBuilder); + int colId = getColId(task.spec(), colName); + totalDataSize += task.file().columnSizes().get(colId); + totalDataCount += task.file().recordCount(); + totalNumNull += task.file().nullValueCounts().get(colId); } } catch (IOException e) { LOG.warn("Error to close FileScanTask.", e); } + ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(totalDataCount); + columnStatisticBuilder.setMaxValue(Double.POSITIVE_INFINITY); + columnStatisticBuilder.setMinValue(Double.NEGATIVE_INFINITY); + columnStatisticBuilder.setDataSize(totalDataSize); + columnStatisticBuilder.setAvgSizeByte(0); + columnStatisticBuilder.setNumNulls(totalNumNull); if (columnStatisticBuilder.getCount() > 0) { columnStatisticBuilder.setAvgSizeByte(columnStatisticBuilder.getDataSize() / columnStatisticBuilder.getCount()); @@ -680,8 +681,7 @@ public static Optional getIcebergColumnStats(String colName, or return Optional.of(columnStatisticBuilder.build()); } - private static void processDataFile(DataFile dataFile, PartitionSpec partitionSpec, - String colName, ColumnStatisticBuilder columnStatisticBuilder) { + private static int getColId(PartitionSpec partitionSpec, String colName) { int colId = -1; for (Types.NestedField column : partitionSpec.schema().columns()) { if (column.name().equals(colName)) { @@ -692,12 +692,7 @@ private static void processDataFile(DataFile dataFile, PartitionSpec partitionSp if (colId == -1) { throw new RuntimeException(String.format("Column %s not exist.", colName)); } - // Update the data size, count and num of nulls in columnStatisticBuilder. - // TODO: Get min max value. - columnStatisticBuilder.setDataSize(columnStatisticBuilder.getDataSize() + dataFile.columnSizes().get(colId)); - columnStatisticBuilder.setCount(columnStatisticBuilder.getCount() + dataFile.recordCount()); - columnStatisticBuilder.setNumNulls(columnStatisticBuilder.getNumNulls() - + dataFile.nullValueCounts().get(colId)); + return colId; } public static boolean isUnsupportedType(Type type) { @@ -957,27 +952,26 @@ public static boolean isMvColumn(TableIf table, String columnName) { } public static boolean isEmptyTable(TableIf table, AnalysisInfo.AnalysisMethod method) { - int waitRowCountReportedTime = 75; + int waitRowCountReportedTime = 120; if (!(table instanceof OlapTable) || method.equals(AnalysisInfo.AnalysisMethod.FULL)) { return false; } OlapTable olapTable = (OlapTable) table; + long rowCount = 0; for (int i = 0; i < waitRowCountReportedTime; i++) { - if (olapTable.getRowCount() > 0) { - return false; - } - // If visible version is 2, table is probably not empty. So we wait row count to be reported. - // If visible version is not 2 and getRowCount return 0, we assume it is an empty table. - if (olapTable.getVisibleVersion() != TableAttributes.TABLE_INIT_VERSION + 1) { - return true; - } - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - LOG.info("Sleep interrupted.", e); + rowCount = olapTable.getRowCountForIndex(olapTable.getBaseIndexId(), true); + // rowCount == -1 means new table or first load row count not fully reported, need to wait. + if (rowCount == -1) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.info("Sleep interrupted."); + } + continue; } + break; } - return true; + return rowCount == 0; } public static boolean needAnalyzeColumn(TableIf table, Pair column) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java index 876e6ca40b4ef2..93a8ecdec6a279 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java @@ -188,7 +188,7 @@ public Backend(long id, String host, int heartbeatPort) { } public String getCloudClusterStatus() { - return tagMap.getOrDefault(Tag.CLOUD_CLUSTER_STATUS, String.valueOf(Cloud.ClusterStatus.UNKNOWN)); + return tagMap.getOrDefault(Tag.CLOUD_CLUSTER_STATUS, String.valueOf(Cloud.ClusterStatus.NORMAL)); } public void setCloudClusterStatus(final String clusterStatus) { @@ -925,8 +925,28 @@ public TNetworkAddress getArrowFlightAddress() { return new TNetworkAddress(getHost(), getArrowFlightSqlPort()); } + // Only used for users, we hide and rename some internal tags. public String getTagMapString() { - return "{" + new PrintableMap<>(tagMap, ":", true, false).toString() + "}"; + Map displayTagMap = Maps.newHashMap(); + displayTagMap.putAll(tagMap); + + if (displayTagMap.containsKey("cloud_cluster_public_endpoint")) { + displayTagMap.put("public_endpoint", displayTagMap.remove("cloud_cluster_public_endpoint")); + } + if (displayTagMap.containsKey("cloud_cluster_private_endpoint")) { + displayTagMap.put("private_endpoint", displayTagMap.remove("cloud_cluster_private_endpoint")); + } + if (displayTagMap.containsKey("cloud_cluster_status")) { + displayTagMap.put("compute_group_status", displayTagMap.remove("cloud_cluster_status")); + } + if (displayTagMap.containsKey("cloud_cluster_id")) { + displayTagMap.put("compute_group_id", displayTagMap.remove("cloud_cluster_id")); + } + if (displayTagMap.containsKey("cloud_cluster_name")) { + displayTagMap.put("compute_group_name", displayTagMap.remove("cloud_cluster_name")); + } + + return "{" + new PrintableMap<>(displayTagMap, ":", true, false).toString() + "}"; } public Long getPublishTaskLastTimeAccumulated() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Frontend.java b/fe/fe-core/src/main/java/org/apache/doris/system/Frontend.java index 4f4bdbb7a43530..76fc67c5ac6c61 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Frontend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Frontend.java @@ -47,6 +47,9 @@ public class Frontend implements Writable { // used for getIpByHostname @SerializedName("editLogPort") private int editLogPort; + @SerializedName("cloudUniqueId") + private String cloudUniqueId; + private String version; private int queryPort; @@ -141,6 +144,14 @@ public List getDiskInfos() { return diskInfos; } + public void setCloudUniqueId(String cloudUniqueId) { + this.cloudUniqueId = cloudUniqueId; + } + + public String getCloudUniqueId() { + return cloudUniqueId; + } + /** * handle Frontend's heartbeat response. Because the replayed journal id is very likely to be * changed at each heartbeat response, so we simple return true if the heartbeat status is OK. diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java index 5dd8dd9fca1ca0..3fc09b31f2d312 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java @@ -96,10 +96,6 @@ public void setMaster(int clusterId, String token, long epoch) { long flags = heartbeatFlags.getHeartbeatFlags(); tMasterInfo.setHeartbeatFlags(flags); if (Config.isCloudMode()) { - // Set cloud_instance_id and meta_service_endpoint even if there are empty - // Be can knowns that fe is working in cloud mode. - // Set the cloud instance ID for cloud deployment identification - tMasterInfo.setCloudInstanceId(Config.cloud_instance_id); // Set the endpoint for the metadata service in cloud mode tMasterInfo.setMetaServiceEndpoint(Config.meta_service_endpoint); } @@ -254,6 +250,10 @@ public HeartbeatResponse call() { copiedMasterInfo.setHeartbeatFlags(flags); copiedMasterInfo.setBackendId(backendId); copiedMasterInfo.setFrontendInfos(feInfos); + if (Config.isCloudMode()) { + String cloudUniqueId = backend.getTagMap().get(Tag.CLOUD_UNIQUE_ID); + copiedMasterInfo.setCloudUniqueId(cloudUniqueId); + } THeartbeatResult result; if (!FeConstants.runningUnitTest) { client = ClientPool.backendHeartbeatPool.borrowObject(beAddr); diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java index 4d07d219dc4882..103c659a990645 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java @@ -1089,40 +1089,45 @@ private static void tableOptionsForInternalCatalog(UserIdentity currentUserIdent continue; } OlapTable olapTable = (OlapTable) table; - TRow trow = new TRow(); - trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG - trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA - trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME - trow.addToColumnValue( - new TCell().setStringVal(olapTable.getKeysType().toMetadata())); // TABLE_MODEL - trow.addToColumnValue( - new TCell().setStringVal(olapTable.getKeyColAsString())); // key columTypes - - DistributionInfo distributionInfo = olapTable.getDefaultDistributionInfo(); - if (distributionInfo.getType() == DistributionInfoType.HASH) { - HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; - List distributionColumns = hashDistributionInfo.getDistributionColumns(); - StringBuilder distributeKey = new StringBuilder(); - for (Column c : distributionColumns) { - if (distributeKey.length() != 0) { - distributeKey.append(","); + olapTable.readLock(); + try { + TRow trow = new TRow(); + trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG + trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA + trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME + trow.addToColumnValue( + new TCell().setStringVal(olapTable.getKeysType().toMetadata())); // TABLE_MODEL + trow.addToColumnValue( + new TCell().setStringVal(olapTable.getKeyColAsString())); // key columTypes + + DistributionInfo distributionInfo = olapTable.getDefaultDistributionInfo(); + if (distributionInfo.getType() == DistributionInfoType.HASH) { + HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; + List distributionColumns = hashDistributionInfo.getDistributionColumns(); + StringBuilder distributeKey = new StringBuilder(); + for (Column c : distributionColumns) { + if (distributeKey.length() != 0) { + distributeKey.append(","); + } + distributeKey.append(c.getName()); } - distributeKey.append(c.getName()); - } - if (distributeKey.length() == 0) { - trow.addToColumnValue(new TCell().setStringVal("")); + if (distributeKey.length() == 0) { + trow.addToColumnValue(new TCell().setStringVal("")); + } else { + trow.addToColumnValue( + new TCell().setStringVal(distributeKey.toString())); + } + trow.addToColumnValue(new TCell().setStringVal("HASH")); // DISTRIBUTE_TYPE } else { - trow.addToColumnValue( - new TCell().setStringVal(distributeKey.toString())); + trow.addToColumnValue(new TCell().setStringVal("RANDOM")); // DISTRIBUTE_KEY + trow.addToColumnValue(new TCell().setStringVal("RANDOM")); // DISTRIBUTE_TYPE } - trow.addToColumnValue(new TCell().setStringVal("HASH")); // DISTRIBUTE_TYPE - } else { - trow.addToColumnValue(new TCell().setStringVal("RANDOM")); // DISTRIBUTE_KEY - trow.addToColumnValue(new TCell().setStringVal("RANDOM")); // DISTRIBUTE_TYPE + trow.addToColumnValue(new TCell().setIntVal(distributionInfo.getBucketNum())); // BUCKETS_NUM + trow.addToColumnValue(new TCell().setIntVal(olapTable.getPartitionNum())); // PARTITION_NUM + dataBatch.add(trow); + } finally { + olapTable.readUnlock(); } - trow.addToColumnValue(new TCell().setIntVal(distributionInfo.getBucketNum())); // BUCKETS_NUM - trow.addToColumnValue(new TCell().setIntVal(olapTable.getPartitionNum())); // PARTITION_NUM - dataBatch.add(trow); } } @@ -1206,29 +1211,34 @@ private static void tablePropertiesForInternalCatalog(UserIdentity currentUserId continue; } OlapTable olapTable = (OlapTable) table; - TableProperty property = olapTable.getTableProperty(); - if (property == null) { - // if there is no properties, then write empty properties and check next table. - TRow trow = new TRow(); - trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG - trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA - trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME - trow.addToColumnValue(new TCell().setStringVal("")); // PROPERTIES_NAME - trow.addToColumnValue(new TCell().setStringVal("")); // PROPERTIES_VALUE - dataBatch.add(trow); - continue; - } + olapTable.readLock(); + try { + TableProperty property = olapTable.getTableProperty(); + if (property == null) { + // if there is no properties, then write empty properties and check next table. + TRow trow = new TRow(); + trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG + trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA + trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME + trow.addToColumnValue(new TCell().setStringVal("")); // PROPERTIES_NAME + trow.addToColumnValue(new TCell().setStringVal("")); // PROPERTIES_VALUE + dataBatch.add(trow); + continue; + } - Map propertiesMap = property.getProperties(); - propertiesMap.forEach((key, value) -> { - TRow trow = new TRow(); - trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG - trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA - trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME - trow.addToColumnValue(new TCell().setStringVal(key)); // PROPERTIES_NAME - trow.addToColumnValue(new TCell().setStringVal(value)); // PROPERTIES_VALUE - dataBatch.add(trow); - }); + Map propertiesMap = property.getProperties(); + propertiesMap.forEach((key, value) -> { + TRow trow = new TRow(); + trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG + trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA + trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME + trow.addToColumnValue(new TCell().setStringVal(key)); // PROPERTIES_NAME + trow.addToColumnValue(new TCell().setStringVal(value)); // PROPERTIES_VALUE + dataBatch.add(trow); + }); + } finally { + olapTable.readUnlock(); + } } // for table } @@ -1336,49 +1346,56 @@ private static void partitionsForInternalCatalog(UserIdentity currentUserIdentit } OlapTable olapTable = (OlapTable) table; - Collection allPartitions = olapTable.getAllPartitions(); + olapTable.readLock(); + try { + Collection allPartitions = olapTable.getAllPartitions(); - for (Partition partition : allPartitions) { - TRow trow = new TRow(); - trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG - trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA - trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME - trow.addToColumnValue(new TCell().setStringVal(partition.getName())); // PARTITION_NAME - trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_NAME (always null) - - trow.addToColumnValue(new TCell().setIntVal(0)); //PARTITION_ORDINAL_POSITION (not available) - trow.addToColumnValue(new TCell().setIntVal(0)); //SUBPARTITION_ORDINAL_POSITION (not available) - trow.addToColumnValue(new TCell().setStringVal( - olapTable.getPartitionInfo().getType().toString())); // PARTITION_METHOD - trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_METHOD(always null) - PartitionItem item = olapTable.getPartitionInfo().getItem(partition.getId()); - if ((olapTable.getPartitionInfo().getType() == PartitionType.UNPARTITIONED) || (item == null)) { - trow.addToColumnValue(new TCell().setStringVal("NULL")); // if unpartitioned, its null - trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_EXPRESSION (always null) - trow.addToColumnValue(new TCell().setStringVal("NULL")); // PARITION DESC, its null - } else { + for (Partition partition : allPartitions) { + TRow trow = new TRow(); + trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG + trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA + trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME + trow.addToColumnValue(new TCell().setStringVal(partition.getName())); // PARTITION_NAME + trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_NAME (always null) + + trow.addToColumnValue(new TCell().setIntVal(0)); //PARTITION_ORDINAL_POSITION (not available) + trow.addToColumnValue(new TCell().setIntVal(0)); //SUBPARTITION_ORDINAL_POSITION (not available) trow.addToColumnValue(new TCell().setStringVal( - olapTable.getPartitionInfo() - .getDisplayPartitionColumns().toString())); // PARTITION_EXPRESSION - trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_EXPRESSION (always null) + olapTable.getPartitionInfo().getType().toString())); // PARTITION_METHOD + trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_METHOD(always null) + PartitionItem item = olapTable.getPartitionInfo().getItem(partition.getId()); + if ((olapTable.getPartitionInfo().getType() == PartitionType.UNPARTITIONED) || (item == null)) { + trow.addToColumnValue(new TCell().setStringVal("NULL")); // if unpartitioned, its null + trow.addToColumnValue( + new TCell().setStringVal("NULL")); // SUBPARTITION_EXPRESSION (always null) + trow.addToColumnValue(new TCell().setStringVal("NULL")); // PARITION DESC, its null + } else { + trow.addToColumnValue(new TCell().setStringVal( + olapTable.getPartitionInfo() + .getDisplayPartitionColumns().toString())); // PARTITION_EXPRESSION + trow.addToColumnValue( + new TCell().setStringVal("NULL")); // SUBPARTITION_EXPRESSION (always null) + trow.addToColumnValue(new TCell().setStringVal( + item.getItemsSql())); // PARITION DESC + } + trow.addToColumnValue(new TCell().setLongVal(partition.getRowCount())); //TABLE_ROWS (PARTITION row) + trow.addToColumnValue(new TCell().setLongVal(partition.getAvgRowLength())); //AVG_ROW_LENGTH + trow.addToColumnValue(new TCell().setLongVal(partition.getDataLength())); //DATA_LENGTH + trow.addToColumnValue(new TCell().setIntVal(0)); //MAX_DATA_LENGTH (not available) + trow.addToColumnValue(new TCell().setIntVal(0)); //INDEX_LENGTH (not available) + trow.addToColumnValue(new TCell().setIntVal(0)); //DATA_FREE (not available) + trow.addToColumnValue(new TCell().setStringVal("NULL")); //CREATE_TIME (not available) trow.addToColumnValue(new TCell().setStringVal( - item.getItemsSql())); // PARITION DESC + TimeUtils.longToTimeString(partition.getVisibleVersionTime()))); //UPDATE_TIME + trow.addToColumnValue(new TCell().setStringVal("NULL")); // CHECK_TIME (not available) + trow.addToColumnValue(new TCell().setIntVal(0)); //CHECKSUM (not available) + trow.addToColumnValue(new TCell().setStringVal("")); // PARTITION_COMMENT (not available) + trow.addToColumnValue(new TCell().setStringVal("")); // NODEGROUP (not available) + trow.addToColumnValue(new TCell().setStringVal("")); // TABLESPACE_NAME (not available) + dataBatch.add(trow); } - trow.addToColumnValue(new TCell().setLongVal(partition.getRowCount())); //TABLE_ROWS (PARTITION row) - trow.addToColumnValue(new TCell().setLongVal(partition.getAvgRowLength())); //AVG_ROW_LENGTH - trow.addToColumnValue(new TCell().setLongVal(partition.getDataLength())); //DATA_LENGTH - trow.addToColumnValue(new TCell().setIntVal(0)); //MAX_DATA_LENGTH (not available) - trow.addToColumnValue(new TCell().setIntVal(0)); //INDEX_LENGTH (not available) - trow.addToColumnValue(new TCell().setIntVal(0)); //DATA_FREE (not available) - trow.addToColumnValue(new TCell().setStringVal("NULL")); //CREATE_TIME (not available) - trow.addToColumnValue(new TCell().setStringVal( - TimeUtils.longToTimeString(partition.getVisibleVersionTime()))); //UPDATE_TIME - trow.addToColumnValue(new TCell().setStringVal("NULL")); // CHECK_TIME (not available) - trow.addToColumnValue(new TCell().setIntVal(0)); //CHECKSUM (not available) - trow.addToColumnValue(new TCell().setStringVal("")); // PARTITION_COMMENT (not available) - trow.addToColumnValue(new TCell().setStringVal("")); // NODEGROUP (not available) - trow.addToColumnValue(new TCell().setStringVal("")); // TABLESPACE_NAME (not available) - dataBatch.add(trow); + } finally { + olapTable.readUnlock(); } } // for table } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractExternalTransactionManager.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractExternalTransactionManager.java new file mode 100644 index 00000000000000..da80b8f77bd6f0 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/AbstractExternalTransactionManager.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.transaction; + +import org.apache.doris.catalog.Env; +import org.apache.doris.common.UserException; +import org.apache.doris.datasource.operations.ExternalMetadataOps; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public abstract class AbstractExternalTransactionManager implements TransactionManager { + private static final Logger LOG = LogManager.getLogger(AbstractExternalTransactionManager.class); + private final Map transactions = new ConcurrentHashMap<>(); + protected final ExternalMetadataOps ops; + + public AbstractExternalTransactionManager(ExternalMetadataOps ops) { + this.ops = ops; + } + + abstract T createTransaction(); + + @Override + public long begin() { + long id = Env.getCurrentEnv().getNextId(); + T transaction = createTransaction(); + transactions.put(id, transaction); + Env.getCurrentEnv().getGlobalExternalTransactionInfoMgr().putTxnById(id, transaction); + return id; + } + + @Override + public void commit(long id) throws UserException { + getTransactionWithException(id).commit(); + transactions.remove(id); + Env.getCurrentEnv().getGlobalExternalTransactionInfoMgr().removeTxnById(id); + } + + @Override + public void rollback(long id) { + try { + getTransactionWithException(id).rollback(); + } catch (TransactionNotFoundException e) { + LOG.warn(e.getMessage(), e); + } finally { + transactions.remove(id); + Env.getCurrentEnv().getGlobalExternalTransactionInfoMgr().removeTxnById(id); + } + } + + @Override + public Transaction getTransaction(long id) throws UserException { + return getTransactionWithException(id); + } + + private Transaction getTransactionWithException(long id) throws TransactionNotFoundException { + Transaction txn = transactions.get(id); + if (txn == null) { + throw new TransactionNotFoundException("Can't find transaction for " + id); + } + return txn; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java index 73c930fb4c6367..fa11664cc64a46 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/DatabaseTransactionMgr.java @@ -128,14 +128,6 @@ private enum PublishResult { // transactionId -> running TransactionState private final Map idToRunningTransactionState = Maps.newHashMap(); - /** - * the multi table ids that are in transaction, used to check whether a table is in transaction - * multi table transaction state - * txnId -> tableId list - */ - private final ConcurrentHashMap> multiTableRunningTransactionTableIdMaps = - new ConcurrentHashMap<>(); - // transactionId -> final status TransactionState private final Map idToFinalStatusTransactionState = Maps.newHashMap(); private final Map subTxnIdToTxnId = new ConcurrentHashMap<>(); @@ -478,8 +470,13 @@ public void preCommitTransaction2PC(List
    tableList, long transactionId, checkCommitStatus(tableList, transactionState, tabletCommitInfos, txnCommitAttachment, errorReplicaIds, tableToPartition, totalInvolvedBackends); - unprotectedPreCommitTransaction2PC(transactionState, errorReplicaIds, tableToPartition, - totalInvolvedBackends, db); + writeLock(); + try { + unprotectedPreCommitTransaction2PC(transactionState, errorReplicaIds, tableToPartition, + totalInvolvedBackends, db); + } finally { + writeUnlock(); + } LOG.info("transaction:[{}] successfully pre-committed", transactionState); } @@ -850,7 +847,7 @@ public void commitTransaction(List
    tableList, long transactionId, List tableList, + protected void commitTransaction(long transactionId, List
    tableList, List subTransactionStates) throws UserException { // check status // the caller method already own tables' write lock @@ -1538,8 +1535,18 @@ private PartitionCommitInfo generatePartitionCommitInfo(OlapTable table, long pa protected void unprotectedCommitTransaction(TransactionState transactionState, Set errorReplicaIds, Map> tableToPartition, Set totalInvolvedBackends, Database db) { - checkBeforeUnprotectedCommitTransaction(transactionState, errorReplicaIds); - + // transaction state is modified during check if the transaction could committed + if (transactionState.getTransactionStatus() != TransactionStatus.PREPARE) { + return; + } + // update transaction state version + long commitTime = System.currentTimeMillis(); + transactionState.setCommitTime(commitTime); + if (MetricRepo.isInit) { + MetricRepo.HISTO_TXN_EXEC_LATENCY.update(commitTime - transactionState.getPrepareTime()); + } + transactionState.setTransactionStatus(TransactionStatus.COMMITTED); + transactionState.setErrorReplicas(errorReplicaIds); for (long tableId : tableToPartition.keySet()) { OlapTable table = (OlapTable) db.getTableNullable(tableId); TableCommitInfo tableCommitInfo = new TableCommitInfo(tableId); @@ -1555,7 +1562,9 @@ protected void unprotectedCommitTransaction(TransactionState transactionState, S transactionState.setInvolvedBackends(totalInvolvedBackends); } - private void checkBeforeUnprotectedCommitTransaction(TransactionState transactionState, Set errorReplicaIds) { + protected void unprotectedCommitTransaction(TransactionState transactionState, Set errorReplicaIds, + Map> subTxnToPartition, Set totalInvolvedBackends, + List subTransactionStates, Database db) { // transaction state is modified during check if the transaction could committed if (transactionState.getTransactionStatus() != TransactionStatus.PREPARE) { return; @@ -1569,15 +1578,6 @@ private void checkBeforeUnprotectedCommitTransaction(TransactionState transactio transactionState.setTransactionStatus(TransactionStatus.COMMITTED); transactionState.setErrorReplicas(errorReplicaIds); - // persist transactionState - unprotectUpsertTransactionState(transactionState, false); - } - - protected void unprotectedCommitTransaction(TransactionState transactionState, Set errorReplicaIds, - Map> subTxnToPartition, Set totalInvolvedBackends, - List subTransactionStates, Database db) { - checkBeforeUnprotectedCommitTransaction(transactionState, errorReplicaIds); - Map> tableToSubTransactionState = new HashMap<>(); for (SubTransactionState subTransactionState : subTransactionStates) { long tableId = subTransactionState.getTable().getId(); @@ -2199,14 +2199,14 @@ private void updatePartitionNextVersion(TransactionState transactionState, Datab } } if (LOG.isDebugEnabled()) { - LOG.debug("txn_id={}, partition to next version={}", transactionState.getTransactionId(), + LOG.debug("txn_id={}, partition to version={}", transactionState.getTransactionId(), partitionToVersionMap); } for (Entry entry : partitionToVersionMap.entrySet()) { Partition partition = entry.getKey(); long version = entry.getValue(); partition.setNextVersion(version + 1); - LOG.debug("set partition={}, next_version={}", partition.getId(), partition.getNextVersion()); + LOG.debug("set partition={}, next_version={}", partition.getId(), version + 1); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalExternalTransactionInfoMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalExternalTransactionInfoMgr.java new file mode 100644 index 00000000000000..e516c648dff9b0 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalExternalTransactionInfoMgr.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.transaction; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class GlobalExternalTransactionInfoMgr { + public Map idToTxn = new ConcurrentHashMap<>(); + + public Transaction getTxnById(long txnId) { + if (idToTxn.containsKey(txnId)) { + return idToTxn.get(txnId); + } + throw new RuntimeException("Can't find txn for " + txnId); + } + + public void putTxnById(long txnId, Transaction txn) { + if (idToTxn.containsKey(txnId)) { + throw new RuntimeException("Duplicate txnId for " + txnId); + } + idToTxn.put(txnId, txn); + } + + public void removeTxnById(long txnId) { + idToTxn.remove(txnId); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java index f97fbf5f191c26..ff1115bf0a6990 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgr.java @@ -200,7 +200,7 @@ public void preCommitTransaction2PC(Database db, List
    tableList, long tra } } - public void preCommitTransaction2PC(long dbId, List
    tableList, long transactionId, + private void preCommitTransaction2PC(long dbId, List
    tableList, long transactionId, List tabletCommitInfos, TxnCommitAttachment txnCommitAttachment) throws UserException { if (Config.disable_load_job) { @@ -214,6 +214,7 @@ public void preCommitTransaction2PC(long dbId, List
    tableList, long trans dbTransactionMgr.preCommitTransaction2PC(tableList, transactionId, tabletCommitInfos, txnCommitAttachment); } + @Deprecated public void commitTransaction(long dbId, List
    tableList, long transactionId, List tabletCommitInfos) throws UserException { @@ -634,6 +635,7 @@ public void abortTxnWhenCoordinateBeRestart(long coordinateBeId, String coordina TransactionState transactionState = dbTransactionMgr.getTransactionState(txnInfo.second); long coordStartTime = transactionState.getCoordinator().startTime; if (coordStartTime > 0 && coordStartTime < beStartTime) { + // does not hold table write lock dbTransactionMgr.abortTransaction(txnInfo.second, "coordinate BE restart", null); } } catch (UserException e) { @@ -652,6 +654,7 @@ public void abortTxnWhenCoordinateBeDown(long coordinateBeId, String coordinateH = getPrepareTransactionIdByCoordinateBe(coordinateBeId, coordinateHost, limit); for (Pair txnInfo : transactionIdByCoordinateBe) { try { + // does not hold table write lock DatabaseTransactionMgr dbTransactionMgr = getDatabaseTransactionMgr(txnInfo.first); dbTransactionMgr.abortTransaction(txnInfo.second, "coordinate BE is down", null); } catch (UserException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgrIface.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgrIface.java index 9a9b3e16b1ef9e..c8d436e68d8356 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgrIface.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/GlobalTransactionMgrIface.java @@ -77,6 +77,7 @@ public void preCommitTransaction2PC(Database db, List
    tableList, long tra TxnCommitAttachment txnCommitAttachment) throws UserException; + @Deprecated public void commitTransaction(long dbId, List
    tableList, long transactionId, List tabletCommitInfos) throws UserException; diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java index c48210ad452ad9..65f0c2bd5e3cb3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/HiveTransactionManager.java @@ -17,65 +17,26 @@ package org.apache.doris.transaction; -import org.apache.doris.catalog.Env; -import org.apache.doris.common.UserException; import org.apache.doris.datasource.hive.HMSTransaction; import org.apache.doris.datasource.hive.HiveMetadataOps; import org.apache.doris.fs.FileSystemProvider; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; -public class HiveTransactionManager implements TransactionManager { - - private final Map transactions = new ConcurrentHashMap<>(); - private final HiveMetadataOps ops; +public class HiveTransactionManager extends AbstractExternalTransactionManager { private final FileSystemProvider fileSystemProvider; - private final Executor fileSystemExecutor; public HiveTransactionManager(HiveMetadataOps ops, FileSystemProvider fileSystemProvider, Executor fileSystemExecutor) { - this.ops = ops; + super(ops); this.fileSystemProvider = fileSystemProvider; this.fileSystemExecutor = fileSystemExecutor; } @Override - public long begin() { - long id = Env.getCurrentEnv().getNextId(); - HMSTransaction hiveTransaction = new HMSTransaction(ops, fileSystemProvider, fileSystemExecutor); - transactions.put(id, hiveTransaction); - return id; - } - - @Override - public void commit(long id) throws UserException { - getTransactionWithException(id).commit(); - transactions.remove(id); - } - - @Override - public void rollback(long id) { - try { - getTransactionWithException(id).rollback(); - } finally { - transactions.remove(id); - } - } - - @Override - public HMSTransaction getTransaction(long id) { - return getTransactionWithException(id); - } - - public HMSTransaction getTransactionWithException(long id) { - HMSTransaction hiveTransaction = transactions.get(id); - if (hiveTransaction == null) { - throw new RuntimeException("Can't find transaction for " + id); - } - return hiveTransaction; + HMSTransaction createTransaction() { + return new HMSTransaction((HiveMetadataOps) ops, fileSystemProvider, fileSystemExecutor); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java index f373c13368558f..8f4d25a19b3ac5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/IcebergTransactionManager.java @@ -18,56 +18,17 @@ package org.apache.doris.transaction; -import org.apache.doris.catalog.Env; -import org.apache.doris.common.UserException; import org.apache.doris.datasource.iceberg.IcebergMetadataOps; import org.apache.doris.datasource.iceberg.IcebergTransaction; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class IcebergTransactionManager implements TransactionManager { - - private final Map transactions = new ConcurrentHashMap<>(); - private final IcebergMetadataOps ops; +public class IcebergTransactionManager extends AbstractExternalTransactionManager { public IcebergTransactionManager(IcebergMetadataOps ops) { - this.ops = ops; + super(ops); } @Override - public long begin() { - long id = Env.getCurrentEnv().getNextId(); - IcebergTransaction icebergTransaction = new IcebergTransaction(ops); - transactions.put(id, icebergTransaction); - return id; - } - - @Override - public void commit(long id) throws UserException { - getTransactionWithException(id).commit(); - transactions.remove(id); - } - - @Override - public void rollback(long id) { - try { - getTransactionWithException(id).rollback(); - } finally { - transactions.remove(id); - } - } - - @Override - public IcebergTransaction getTransaction(long id) { - return getTransactionWithException(id); - } - - public IcebergTransaction getTransactionWithException(long id) { - IcebergTransaction icebergTransaction = transactions.get(id); - if (icebergTransaction == null) { - throw new RuntimeException("Can't find transaction for " + id); - } - return icebergTransaction; + IcebergTransaction createTransaction() { + return new IcebergTransaction((IcebergMetadataOps) ops); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionManager.java b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionManager.java index ca9cbb917ec277..fbff324ae914b1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/transaction/TransactionManager.java @@ -27,5 +27,5 @@ public interface TransactionManager { void rollback(long id); - Transaction getTransaction(long id); + Transaction getTransaction(long id) throws UserException; } diff --git a/fe/fe-core/src/main/jflex/sql_scanner.flex b/fe/fe-core/src/main/jflex/sql_scanner.flex index be16c0a6a00440..1863fc877cbcd9 100644 --- a/fe/fe-core/src/main/jflex/sql_scanner.flex +++ b/fe/fe-core/src/main/jflex/sql_scanner.flex @@ -158,6 +158,7 @@ import org.apache.doris.qe.SqlModeHelper; keywordMap.put("committed", new Integer(SqlParserSymbols.KW_COMMITTED)); keywordMap.put("compact", new Integer(SqlParserSymbols.KW_COMPACT)); keywordMap.put("complete", new Integer(SqlParserSymbols.KW_COMPLETE)); + keywordMap.put("compute", new Integer(SqlParserSymbols.KW_COMPUTE)); keywordMap.put("config", new Integer(SqlParserSymbols.KW_CONFIG)); keywordMap.put("connection", new Integer(SqlParserSymbols.KW_CONNECTION)); keywordMap.put("connection_id", new Integer(SqlParserSymbols.KW_CONNECTION_ID)); @@ -456,6 +457,7 @@ import org.apache.doris.qe.SqlModeHelper; keywordMap.put("stop", new Integer(SqlParserSymbols.KW_STOP)); keywordMap.put("storage", new Integer(SqlParserSymbols.KW_STORAGE)); keywordMap.put("vault", new Integer(SqlParserSymbols.KW_VAULT)); + keywordMap.put("vaults", new Integer(SqlParserSymbols.KW_VAULTS)); keywordMap.put("stream", new Integer(SqlParserSymbols.KW_STREAM)); keywordMap.put("streaming", new Integer(SqlParserSymbols.KW_STREAMING)); keywordMap.put("string", new Integer(SqlParserSymbols.KW_STRING)); diff --git a/fe/fe-core/src/main/resources/META-INF/services/org.apache.doris.mysql.privilege.AccessControllerFactory b/fe/fe-core/src/main/resources/META-INF/services/org.apache.doris.mysql.privilege.AccessControllerFactory new file mode 100644 index 00000000000000..e2100cb8b23508 --- /dev/null +++ b/fe/fe-core/src/main/resources/META-INF/services/org.apache.doris.mysql.privilege.AccessControllerFactory @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +org.apache.doris.mysql.privilege.RangerDorisAccessControllerFactory +org.apache.doris.catalog.authorizer.ranger.hive.RangerHiveAccessControllerFactory \ No newline at end of file diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java index 0d8759aa08e237..0d6e09c5051d89 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/InsertArrayStmtTest.java @@ -23,7 +23,11 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.ExceptionChecker; import org.apache.doris.common.util.SqlParserUtils; +import org.apache.doris.nereids.StatementContext; +import org.apache.doris.nereids.glue.LogicalPlanAdapter; +import org.apache.doris.nereids.trees.plans.commands.UnsupportedCommand; import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.OriginStatement; import org.apache.doris.qe.QueryState; import org.apache.doris.qe.QueryState.MysqlStateType; import org.apache.doris.qe.StmtExecutor; @@ -131,19 +135,18 @@ public void testTransactionalInsert() throws Exception { + "\"disable_auto_compaction\" = \"false\"\n" + ");")); - SqlParser parser = new SqlParser(new SqlScanner( - new StringReader("begin"), connectContext.getSessionVariable().getSqlMode() - )); - TransactionBeginStmt beginStmt = (TransactionBeginStmt) SqlParserUtils.getFirstStmt(parser); - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, beginStmt); + OriginStatement originStatement = new OriginStatement("begin", 0); + StatementBase begin = new LogicalPlanAdapter(new UnsupportedCommand(), + new StatementContext(connectContext, originStatement)); + begin.setOrigStmt(originStatement); + StmtExecutor stmtExecutor = new StmtExecutor(connectContext, begin); stmtExecutor.execute(); - parser = new SqlParser(new SqlScanner( - new StringReader("insert into test.txn_insert_tbl values(2, 3.3, \"xyz\", [1], [1, 0]);"), - connectContext.getSessionVariable().getSqlMode() - )); - InsertStmt insertStmt = (InsertStmt) SqlParserUtils.getFirstStmt(parser); - stmtExecutor = new StmtExecutor(connectContext, insertStmt); + originStatement = new OriginStatement("insert into test.txn_insert_tbl values(2, 3.3, \"xyz\", [1], [1, 0]);", 0); + StatementBase insert = new LogicalPlanAdapter(new UnsupportedCommand(), + new StatementContext(connectContext, originStatement)); + insert.setOrigStmt(originStatement); + stmtExecutor = new StmtExecutor(connectContext, insert); stmtExecutor.execute(); QueryState state = connectContext.getState(); Assert.assertEquals(state.getErrorMessage(), MysqlStateType.OK, state.getStateType()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java index 7bce2526df00c3..d30658ff6d48b2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/RangePartitionPruneTest.java @@ -124,87 +124,87 @@ protected void runBeforeAll() throws Exception { private void initTestCases() { // 1. Single partition column // no filters - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1", "partitions=8/8"); + addCase("select * from test.t1", "partitions=8/8"); // equal to - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt=20211122", "partitions=1/8"); + addCase("select * from test.t1 where dt=20211122", "partitions=1/8"); // less than - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt<20211122", "partitions=2/8"); + addCase("select * from test.t1 where dt<20211122", "partitions=2/8"); // less than or equal - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt<=20211122", "partitions=3/8"); + addCase("select * from test.t1 where dt<=20211122", "partitions=3/8"); // greater than - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt>20211122", "partitions=6/8"); + addCase("select * from test.t1 where dt>20211122", "partitions=5/8"); // greater than or equal - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt>=20211122", "partitions=6/8"); + addCase("select * from test.t1 where dt>=20211122", "partitions=6/8"); // in - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt in (20211124, 20211126, 20211122)", "partitions=3/8"); + addCase("select * from test.t1 where dt in (20211124, 20211126, 20211122)", "partitions=3/8"); // is null - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt is null", "partitions=1/8"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.`single_not_null` where dt is null", "partitions=0/7"); + addCase("select * from test.t1 where dt is null", "partitions=1/8"); + addCase("select * from test.`single_not_null` where dt is null", "VEMPTYSET"); // not equal to - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt!=20211122", "partitions=8/8"); + addCase("select * from test.t1 where dt!=20211122", "partitions=7/8"); // 2. Multiple partition columns // no filters - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2", "partitions=9/9"); + addCase("select * from test.t2", "partitions=9/9"); // equal to - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1=7", "partitions=2/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2=7", "partitions=9/9"); + addCase("select * from test.t2 where k1=7", "partitions=2/9"); + addCase("select * from test.t2 where k2=7", "partitions=7/9"); // less than - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1<7", "partitions=2/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2<7", "partitions=9/9"); + addCase("select * from test.t2 where k1<7", "partitions=2/9"); + addCase("select * from test.t2 where k2<7", "partitions=9/9"); // less than or equal - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1<=7", "partitions=3/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2>7", "partitions=9/9"); + addCase("select * from test.t2 where k1<=7", "partitions=3/9"); + addCase("select * from test.t2 where k2>7", "partitions=8/9"); // greater than or equal - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1>=7", "partitions=8/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2>=7", "partitions=9/9"); + addCase("select * from test.t2 where k1>=7", "partitions=8/9"); + addCase("select * from test.t2 where k2>=7", "partitions=8/9"); // in - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 in (7,9,16)", "partitions=3/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2 in (7,9,16)", "partitions=9/9"); + addCase("select * from test.t2 where k1 in (7,9,16)", "partitions=3/9"); + addCase("select * from test.t2 where k2 in (7,9,16)", "partitions=8/9"); // is null - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 is null", "partitions=1/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2 is null", "partitions=9/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.multi_not_null where k1 is null", "partitions=0/2"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.multi_not_null where k2 is null", "partitions=2/2"); + addCase("select * from test.t2 where k1 is null", "partitions=1/9"); + addCase("select * from test.t2 where k2 is null", "partitions=7/9"); + addCase("select * from test.multi_not_null where k1 is null", "VEMPTYSET"); + addCase("select * from test.multi_not_null where k2 is null", "EMPTYSET"); // not equal to - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1!=23", "partitions=9/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k2!=23", "partitions=9/9"); + addCase("select * from test.t2 where k1!=23", "partitions=7/9"); + addCase("select * from test.t2 where k2!=23", "partitions=9/9"); // 3. Conjunctive predicates // equal to and other predicates - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1=23 and k2=5", "partitions=1/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1=23 and k2>5", "partitions=1/9"); + addCase("select * from test.t2 where k1=23 and k2=5", "partitions=1/9"); + addCase("select * from test.t2 where k1=23 and k2>5", "partitions=1/9"); // in and other equal predicates - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 in (3, 10, 13) and k2>10", "partitions=2/9"); + addCase("select * from test.t2 where k1 in (3, 10, 13) and k2>10", "partitions=2/9"); // is null and other predicates - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 > 10 and k1 is null", "partitions=0/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 is null and k1 > 10", "partitions=0/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.multi_not_null where k1 > 10 and k1 is null", "partitions=0/2"); + addCase("select * from test.t2 where k1 > 10 and k1 is null", "VEMPTYSET"); + addCase("select * from test.t2 where k1 is null and k1 > 10", "VEMPTYSET"); + addCase("select * from test.multi_not_null where k1 > 10 and k1 is null", "VEMPTYSET"); // others predicates combination - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 > 10 and k2 < 4", "partitions=6/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 >10 and k1 < 10 and (k1=11 or k1=12)", "partitions=0/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 > 20 and k1 < 7 and k1 = 10", "partitions=0/9"); + addCase("select * from test.t2 where k1 > 10 and k2 < 4", "partitions=5/9"); + addCase("select * from test.t2 where k1 >10 and k1 < 10 and (k1=11 or k1=12)", "VEMPTYSET"); + addCase("select * from test.t2 where k1 > 20 and k1 < 7 and k1 = 10", "VEMPTYSET"); // 4. Disjunctive predicates - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1=10 or k1=23", "partitions=3/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where (k1=10 or k1=23) and (k2=4 or k2=5)", "partitions=1/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where (k1=10 or k1=23) and (k2=4 or k2=11)", "partitions=2/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where (k1=10 or k1=23) and (k2=3 or k2=4 or k2=11)", "partitions=3/9"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where dt=20211123 or dt=20211124", "partitions=2/8"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where ((dt=20211123 and k1=1) or (dt=20211125 and k1=3))", "partitions=2/8"); + addCase("select * from test.t2 where k1=10 or k1=23", "partitions=3/9"); + addCase("select * from test.t2 where (k1=10 or k1=23) and (k2=4 or k2=5)", "partitions=1/9"); + addCase("select * from test.t2 where (k1=10 or k1=23) and (k2=4 or k2=11)", "partitions=2/9"); + addCase("select * from test.t2 where (k1=10 or k1=23) and (k2=3 or k2=4 or k2=11)", "partitions=3/9"); + addCase("select * from test.t1 where dt=20211123 or dt=20211124", "partitions=2/8"); + addCase("select * from test.t1 where ((dt=20211123 and k1=1) or (dt=20211125 and k1=3))", "partitions=2/8"); // TODO: predicates are "PREDICATES: ((`dt` = 20211123 AND `k1` = 1) OR (`dt` = 20211125 AND `k1` = 3)), `k2` > ", // maybe something goes wrong with ExtractCommonFactorsRule. - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where ((dt=20211123 and k1=1) or (dt=20211125 and k1=3)) and k2>0", + addCase("select * from test.t1 where ((dt=20211123 and k1=1) or (dt=20211125 and k1=3)) and k2>0", "partitions=2/8"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t2 where k1 > 10 or k2 < 1", "partitions=9/9"); + addCase("select * from test.t2 where k1 > 10 or k2 < 1", "partitions=9/9"); // add some cases for CompoundPredicate - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where (dt >= 20211121 and dt <= 20211122) or (dt >= 20211123 and dt <= 20211125)", + addCase("select * from test.t1 where (dt >= 20211121 and dt <= 20211122) or (dt >= 20211123 and dt <= 20211125)", "partitions=5/8"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where (dt between 20211121 and 20211122) or (dt between 20211123 and 20211125)", + addCase("select * from test.t1 where (dt between 20211121 and 20211122) or (dt between 20211123 and 20211125)", "partitions=5/8"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.t1 where (dt between 20211121 and 20211122) or dt is null or (dt between 20211123 and 20211125)", + addCase("select * from test.t1 where (dt between 20211121 and 20211122) or dt is null or (dt between 20211123 and 20211125)", "partitions=6/8"); - addCase("select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test_to_date_trunc where event_day= \"2023-08-07 11:00:00\" ", + addCase("select * from test.test_to_date_trunc where event_day= \"2023-08-07 11:00:00\" ", "partitions=1/2"); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java deleted file mode 100755 index b4299f0f62f60c..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java +++ /dev/null @@ -1,1087 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Database; -import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.MaterializedIndex; -import org.apache.doris.catalog.MaterializedIndex.IndexExtState; -import org.apache.doris.catalog.OlapTable; -import org.apache.doris.catalog.Partition; -import org.apache.doris.catalog.Tablet; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.FeConstants; -import org.apache.doris.common.util.Util; -import org.apache.doris.planner.OlapScanNode; -import org.apache.doris.planner.OriginalPlanner; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.VariableMgr; -import org.apache.doris.utframe.DorisAssert; -import org.apache.doris.utframe.UtFrameUtils; - -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import mockit.Mock; -import mockit.MockUp; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.jupiter.api.Disabled; -import org.junit.rules.ExpectedException; - -import java.util.List; -import java.util.Set; -import java.util.UUID; - -public class SelectStmtTest { - private static final String runningDir = "fe/mocked/DemoTest/" + UUID.randomUUID() + "/"; - private static DorisAssert dorisAssert; - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - @AfterClass - public static void tearDown() throws Exception { - UtFrameUtils.cleanDorisFeDir(runningDir); - } - - @BeforeClass - public static void setUp() throws Exception { - Config.enable_http_server_v2 = false; - UtFrameUtils.createDorisCluster(runningDir); - String createTblStmtStr = "create table db1.tbl1(k1 varchar(32)," - + " k2 varchar(32), k3 varchar(32), k4 int, k5 largeint) " - + "AGGREGATE KEY(k1, k2,k3,k4,k5) distributed by hash(k1) buckets 3" - + " properties('replication_num' = '1');"; - String createBaseAllStmtStr = "create table db1.baseall(k1 int, k2 varchar(32)) distributed by hash(k1) " - + "buckets 3 properties('replication_num' = '1');"; - String createPratitionTableStr = "CREATE TABLE db1.partition_table (\n" - + "datekey int(11) NULL COMMENT \"datekey\",\n" - + "poi_id bigint(20) NULL COMMENT \"poi_id\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(datekey, poi_id)\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE(datekey)\n" - + "(PARTITION p20200727 VALUES [(\"20200726\"), (\"20200727\")),\n" - + "PARTITION p20200728 VALUES [(\"20200727\"), (\"20200728\")))\n" - + "DISTRIBUTED BY HASH(poi_id) BUCKETS 2\n" - + "PROPERTIES (\n" - + "\"storage_type\" = \"COLUMN\",\n" - + "\"replication_num\" = \"1\"\n" - + ");"; - String createDatePartitionTableStr = "CREATE TABLE db1.date_partition_table (\n" - + " `dt` date NOT NULL COMMENT \"\",\n" - + " `poi_id` bigint(20) NULL COMMENT \"poi_id\",\n" - + " `uv1` bitmap BITMAP_UNION NOT NULL COMMENT \"\",\n" - + " `uv2` bitmap BITMAP_UNION NOT NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "PARTITION BY RANGE(`dt`)\n" - + "( PARTITION `p201701` VALUES LESS THAN (\"2020-09-08\"),\n" - + " PARTITION `p201702` VALUES LESS THAN (\"2020-09-09\"),\n" - + " PARTITION `p201703` VALUES LESS THAN (\"2020-09-10\"))\n" - + "DISTRIBUTED BY HASH(`poi_id`) BUCKETS 20\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"DEFAULT\"\n" - + ");"; - String tbl1 = "CREATE TABLE db1.table1 (\n" - + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" - + " `citycode` smallint(6) NULL COMMENT \"\",\n" - + " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" - + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"; - String tbl2 = "CREATE TABLE db1.table2 (\n" - + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" - + " `citycode` smallint(6) NULL COMMENT \"\",\n" - + " `username` varchar(32) NOT NULL DEFAULT \"\" COMMENT \"\",\n" - + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "UNIQUE KEY(`siteid`, `citycode`, `username`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`siteid`) BUCKETS 10\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"; - String tbl3 = "CREATE TABLE db1.table3 (\n" - + " `siteid` int(11) NULL DEFAULT \"10\" COMMENT \"\",\n" - + " `citycode` smallint(6) NULL COMMENT \"\",\n" - + " `username` varchar(32) NULL DEFAULT \"\" COMMENT \"\",\n" - + " `pv` bigint(20) NULL DEFAULT \"0\" COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`siteid`, `citycode`, `username`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY RANDOM BUCKETS 10\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"; - dorisAssert = new DorisAssert(); - dorisAssert.withDatabase("db1").useDatabase("db1"); - dorisAssert.withTable(createTblStmtStr) - .withTable(createBaseAllStmtStr) - .withTable(createPratitionTableStr) - .withTable(createDatePartitionTableStr) - .withTable(tbl1) - .withTable(tbl2) - .withTable(tbl3); - } - - @Test - public void testGroupingSets() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String selectStmtStr = "select k1,k2,MAX(k4) from db1.tbl1 GROUP BY GROUPING sets ((k1,k2),(k1),(k2),());"; - UtFrameUtils.parseAndAnalyzeStmt(selectStmtStr, ctx); - String selectStmtStr2 = "select k1,k4,MAX(k4) from db1.tbl1 GROUP BY GROUPING sets ((k1,k4),(k1),(k4),());"; - expectedEx.expect(AnalysisException.class); - expectedEx.expectMessage("column: `k4` cannot both in select list and aggregate functions when using GROUPING" - + " SETS/CUBE/ROLLUP, please use union instead."); - UtFrameUtils.parseAndAnalyzeStmt(selectStmtStr2, ctx); - String selectStmtStr3 = "select k1,k4,MAX(k4+k4) from db1.tbl1 GROUP BY GROUPING sets ((k1,k4),(k1),(k4),());"; - UtFrameUtils.parseAndAnalyzeStmt(selectStmtStr3, ctx); - String selectStmtStr4 = "select k1,k4+k4,MAX(k4+k4) from db1.tbl1 GROUP BY GROUPING sets ((k1,k4),(k1),(k4),()" - + ");"; - UtFrameUtils.parseAndAnalyzeStmt(selectStmtStr4, ctx); - } - - @Disabled - public void testSubqueryInCase() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql1 = "SELECT CASE\n" - + " WHEN (\n" - + " SELECT COUNT(*) / 2\n" - + " FROM db1.tbl1\n" - + " ) > k4 THEN (\n" - + " SELECT AVG(k4)\n" - + " FROM db1.tbl1\n" - + " )\n" - + " ELSE (\n" - + " SELECT SUM(k4)\n" - + " FROM db1.tbl1\n" - + " )\n" - + " END AS kk4\n" - + "FROM db1.tbl1;"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql1, ctx); - stmt.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt.toSql().contains("`$a$1`.`$c$1` > `k4` THEN `$a$2`.`$c$2` ELSE `$a$3`.`$c$3`")); - - String sql2 = "select case when k1 in (select k1 from db1.tbl1) then \"true\" else k1 end a from db1.tbl1"; - try { - SelectStmt stmt2 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql2, ctx); - stmt2.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.fail("syntax not supported."); - } catch (AnalysisException e) { - // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.fail("must be AnalysisException."); - } - try { - String sql3 = "select case k1 when exists (select 1) then \"empty\" else \"p_test\" end a from db1.tbl1"; - SelectStmt stmt3 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql3, ctx); - stmt3.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.fail("syntax not supported."); - } catch (AnalysisException e) { - // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.fail("must be AnalysisException."); - } - String sql4 = "select case when k1 < (select max(k1) from db1.tbl1) and " - + "k1 > (select min(k1) from db1.tbl1) then \"empty\" else \"p_test\" end a from db1.tbl1"; - SelectStmt stmt4 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql4, ctx); - stmt4.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt4.toSql().contains("`k1` < `$a$1`.`$c$1` AND `k1` > `$a$2`.`$c$2`")); - - String sql5 = "select case when k1 < (select max(k1) from db1.tbl1) is null " - + "then \"empty\" else \"p_test\" end a from db1.tbl1"; - SelectStmt stmt5 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql5, ctx); - stmt5.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt5.toSql().contains(" `k1` < `$a$1`.`$c$1` IS NULL ")); - } - - @Test - public void testDeduplicateOrs() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2,\n" - + " db1.tbl1 t3,\n" - + " db1.tbl1 t4,\n" - + " db1.tbl1 t5,\n" - + " db1.tbl1 t6\n" - + "where\n" - + " t2.k1 = t1.k1\n" - + " and t1.k2 = t6.k2\n" - + " and t6.k4 = 2001\n" - + " and(\n" - + " (\n" - + " t1.k2 = t4.k2\n" - + " and t3.k3 = t1.k3\n" - + " and t3.k1 = 'D'\n" - + " and t4.k3 = '2 yr Degree'\n" - + " and t1.k4 between 100.00\n" - + " and 150.00\n" - + " and t4.k4 = 3\n" - + " )\n" - + " or (\n" - + " t1.k2 = t4.k2\n" - + " and t3.k3 = t1.k3\n" - + " and t3.k1 = 'S'\n" - + " and t4.k3 = 'Secondary'\n" - + " and t1.k4 between 50.00\n" - + " and 100.00\n" - + " and t4.k4 = 1\n" - + " )\n" - + " or (\n" - + " t1.k2 = t4.k2\n" - + " and t3.k3 = t1.k3\n" - + " and t3.k1 = 'W'\n" - + " and t4.k3 = 'Advanced Degree'\n" - + " and t1.k4 between 150.00\n" - + " and 200.00\n" - + " and t4.k4 = 1\n" - + " )\n" - + " )\n" - + " and(\n" - + " (\n" - + " t1.k1 = t5.k1\n" - + " and t5.k2 = 'United States'\n" - + " and t5.k3 in ('CO', 'IL', 'MN')\n" - + " and t1.k4 between 100\n" - + " and 200\n" - + " )\n" - + " or (\n" - + " t1.k1 = t5.k1\n" - + " and t5.k2 = 'United States'\n" - + " and t5.k3 in ('OH', 'MT', 'NM')\n" - + " and t1.k4 between 150\n" - + " and 300\n" - + " )\n" - + " or (\n" - + " t1.k1 = t5.k1\n" - + " and t5.k2 = 'United States'\n" - + " and t5.k3 in ('TX', 'MO', 'MI')\n" - + " and t1.k4 between 50 and 250\n" - + " )\n" - + " );"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - stmt.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - String commonExpr1 = "`t1`.`k2` = `t4`.`k2`"; - String commonExpr2 = "`t3`.`k3` = `t1`.`k3`"; - String commonExpr3 = "`t1`.`k1` = `t5`.`k1`"; - String commonExpr4 = "t5`.`k2` = 'United States'"; - String betweenExpanded1 = "(CAST(CAST(`t1`.`k4` AS decimalv3(12,2)) AS int) >= 100)) AND (CAST(CAST(`t1`.`k4` AS decimalv3(12,2)) AS int) <= 150))"; - String betweenExpanded2 = "(CAST(CAST(`t1`.`k4` AS decimalv3(12,2)) AS int) >= 50)) AND (CAST(CAST(`t1`.`k4` AS decimalv3(12,2)) AS int) <= 100))"; - String betweenExpanded3 = "(`t1`.`k4` >= 50)) AND (`t1`.`k4` <= 250)"; - - String rewrittenSql = stmt.toSql(); - Assert.assertTrue(rewrittenSql.contains(commonExpr1)); - Assert.assertEquals(rewrittenSql.indexOf(commonExpr1), rewrittenSql.lastIndexOf(commonExpr1)); - Assert.assertTrue(rewrittenSql.contains(commonExpr2)); - Assert.assertEquals(rewrittenSql.indexOf(commonExpr2), rewrittenSql.lastIndexOf(commonExpr2)); - Assert.assertTrue(rewrittenSql.contains(commonExpr3)); - Assert.assertEquals(rewrittenSql.indexOf(commonExpr3), rewrittenSql.lastIndexOf(commonExpr3)); - Assert.assertTrue(rewrittenSql.contains(commonExpr4)); - Assert.assertEquals(rewrittenSql.indexOf(commonExpr4), rewrittenSql.lastIndexOf(commonExpr4)); - Assert.assertTrue(rewrittenSql.contains(betweenExpanded1)); - Assert.assertTrue(rewrittenSql.contains(betweenExpanded2)); - Assert.assertTrue(rewrittenSql.contains(betweenExpanded3)); - - String sql2 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + "(\n" - + " t1.k1 = t2.k3\n" - + " and t2.k2 = 'United States'\n" - + " and t2.k3 in ('CO', 'IL', 'MN')\n" - + " and t1.k4 between 100\n" - + " and 200\n" - + ")\n" - + "or (\n" - + " t1.k1 = t2.k1\n" - + " and t2.k2 = 'United States1'\n" - + " and t2.k3 in ('OH', 'MT', 'NM')\n" - + " and t1.k4 between 150\n" - + " and 300\n" - + ")\n" - + "or (\n" - + " t1.k1 = t2.k1\n" - + " and t2.k2 = 'United States'\n" - + " and t2.k3 in ('TX', 'MO', 'MI')\n" - + " and t1.k4 between 50 and 250\n" - + ")"; - SelectStmt stmt2 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql2, ctx); - stmt2.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - String fragment3 = - "(((((`t1`.`k4` >= 50) AND (`t1`.`k4` <= 300)) AND `t2`.`k2` IN ('United States', 'United States1')) " - + "AND `t2`.`k3` IN ('CO', 'IL', 'MN', 'OH', 'MT', 'NM', 'TX', 'MO', 'MI')) " - + "AND (((((((`t1`.`k1` = `t2`.`k3`) AND (`t2`.`k2` = 'United States')) " - + "AND `t2`.`k3` IN ('CO', 'IL', 'MN')) AND (`t1`.`k4` >= 100)) AND (`t1`.`k4` <= 200)) " - + "OR " - + "(((((`t1`.`k1` = `t2`.`k1`) AND (`t2`.`k2` = 'United States1')) " - + "AND `t2`.`k3` IN ('OH', 'MT', 'NM')) AND (`t1`.`k4` >= 150)) AND (`t1`.`k4` <= 300))) " - + "OR " - + "(((((`t1`.`k1` = `t2`.`k1`) AND (`t2`.`k2` = 'United States')) " - + "AND `t2`.`k3` IN ('TX', 'MO', 'MI')) " - + "AND (`t1`.`k4` >= 50)) AND (`t1`.`k4` <= 250))))"; - Assert.assertTrue(stmt2.toSql().contains(fragment3)); - - String sql3 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + " t1.k1 = t2.k3 or t1.k1 = t2.k3 or t1.k1 = t2.k3"; - SelectStmt stmt3 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql3, ctx); - stmt3.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertFalse( - stmt3.toSql().contains("(`t1`.`k1` = `t2`.`k3`) OR (`t1`.`k1` = `t2`.`k3`) OR" + " (`t1`.`k1` = `t2`.`k3`)")); - - String sql4 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + " t1.k1 = t2.k2 or t1.k1 = t2.k3 or t1.k1 = t2.k3"; - SelectStmt stmt4 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql4, ctx); - stmt4.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt4.toSql().contains("(`t1`.`k1` = `t2`.`k2`) OR (`t1`.`k1` = `t2`.`k3`)")); - - String sql5 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + " t2.k1 is not null or t1.k1 is not null or t1.k1 is not null"; - SelectStmt stmt5 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql5, ctx); - stmt5.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt5.toSql().contains("`t2`.`k1` IS NOT NULL OR `t1`.`k1` IS NOT NULL")); - Assert.assertEquals(2, stmt5.toSql().split(" OR ").length); - - String sql6 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + " t2.k1 is not null or t1.k1 is not null and t1.k1 is not null"; - SelectStmt stmt6 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql6, ctx); - stmt6.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt6.toSql().contains("`t2`.`k1` IS NOT NULL OR `t1`.`k1` IS NOT NULL")); - Assert.assertEquals(2, stmt6.toSql().split(" OR ").length); - - String sql7 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + " t2.k1 is not null or t1.k1 is not null and t1.k2 is not null"; - SelectStmt stmt7 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql7, ctx); - stmt7.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt7.toSql() - .contains("`t2`.`k1` IS NOT NULL OR (`t1`.`k1` IS NOT NULL AND `t1`.`k2` IS NOT NULL)")); - - String sql8 = "select\n" - + " avg(t1.k4)\n" - + "from\n" - + " db1.tbl1 t1,\n" - + " db1.tbl1 t2\n" - + "where\n" - + " t2.k1 is not null and t1.k1 is not null and t1.k1 is not null"; - SelectStmt stmt8 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql8, ctx); - stmt8.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt8.toSql() - .contains("(`t2`.`k1` IS NOT NULL AND `t1`.`k1` IS NOT NULL) AND `t1`.`k1` IS NOT NULL")); - - String sql9 = "select * from db1.tbl1 where (k1='shutdown' and k4<1) or (k1='switchOff' and k4>=1)"; - SelectStmt stmt9 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql9, ctx); - stmt9.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue( - stmt9.toSql().contains("((`k1` = 'shutdown') AND (`k4` < 1)) OR ((`k1` = 'switchOff') AND (`k4` >= 1))")); - } - - @Test - public void testForbiddenCorrelatedSubqueryInHavingClause() throws Exception { - String sql = "SELECT k1 FROM baseall GROUP BY k1 HAVING EXISTS(SELECT k4 FROM tbl1 GROUP BY k4 HAVING SUM" - + "(baseall.k1) = k4);"; - try { - dorisAssert.query(sql).explainQuery(); - Assert.fail("The correlated subquery in having clause should be forbidden."); - } catch (AnalysisException e) { - System.out.println(e.getMessage()); - } - } - - @Test - public void testGroupByConstantExpression() throws Exception { - String sql = "SELECT k1 - 4*60*60 FROM baseall GROUP BY k1 - 4*60*60"; - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testMultrGroupByInCorrelationSubquery() throws Exception { - String sql = "SELECT * from baseall where k1 > (select min(k1) from tbl1 where baseall.k1 = tbl1.k4 and baseall.k2 = tbl1.k2)"; - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testOuterJoinNullUnionView() throws Exception { - String sql = "WITH test_view(k) AS(SELECT NULL AS k UNION ALL SELECT NULL AS k )\n" - + "SELECT v1.k FROM test_view AS v1 LEFT OUTER JOIN test_view AS v2 ON v1.k=v2.k"; - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testDataGripSupport() throws Exception { - String sql = "select schema();"; - dorisAssert.query(sql).explainQuery(); - sql = "select\n" - - + "collation_name,\n" - - + "character_set_name,\n" - - + "is_default collate utf8mb4_0900_bin = 'Yes' as is_default\n" - + "from information_schema.collations"; - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testRandFunction() throws Exception { - String sql = "select rand(db1.tbl1.k1) from db1.tbl1;"; - try { - dorisAssert.query(sql).explainQuery(); - Assert.fail("The param of rand function must be literal"); - } catch (AnalysisException e) { - System.out.println(e.getMessage()); - } - sql = "select rand(1234) from db1.tbl1;"; - dorisAssert.query(sql).explainQuery(); - sql = "select rand() from db1.tbl1;"; - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testImplicitConvertSupport() throws Exception { - String sql1 = "select /*+ SET_VAR(enable_nereids_planner=false) */ count(*) from db1.partition_table where datekey='20200730'"; - Assert.assertTrue(dorisAssert - .query(sql1) - .explainQuery() - .contains("`datekey` = 20200730")); - String sql2 = "select /*+ SET_VAR(enable_nereids_planner=false) */ count(*) from db1.partition_table where '20200730'=datekey"; - Assert.assertTrue(dorisAssert - .query(sql2) - .explainQuery() - .contains("`datekey` = 20200730")); - String sql3 = "select /*+ SET_VAR(enable_nereids_planner=false) */ count() from db1.date_partition_table where dt=20200908"; - Assert.assertTrue(dorisAssert - .query(sql3) - .explainQuery() - .contains(Config.enable_date_conversion ? "`dt` = '2020-09-08'" : "`dt` = '2020-09-08 00:00:00'")); - String sql4 = "select /*+ SET_VAR(enable_nereids_planner=false) */ count() from db1.date_partition_table where dt='2020-09-08'"; - Assert.assertTrue(dorisAssert - .query(sql4) - .explainQuery() - .contains(Config.enable_date_conversion ? "`dt` = '2020-09-08'" : "`dt` = '2020-09-08 00:00:00'")); - } - - @Test - public void testDeleteSign() throws Exception { - String sql1 = "SELECT /*+ SET_VAR(enable_nereids_planner=true, ENABLE_FALLBACK_TO_ORIGINAL_PLANNER=false, DISABLE_NEREIDS_RULES=PRUNE_EMPTY_PARTITION) */ * FROM db1.table1 LEFT ANTI JOIN db1.table2 ON db1.table1.siteid = db1.table2.siteid;"; - String explain = dorisAssert.query(sql1).explainQuery(); - Assert.assertTrue(explain - .contains("__DORIS_DELETE_SIGN__ = 0")); - Assert.assertFalse(explain.contains("other predicates:")); - String sql2 = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * FROM db1.table1 JOIN db1.table2 ON db1.table1.siteid = db1.table2.siteid;"; - explain = dorisAssert.query(sql2).explainQuery(); - Assert.assertTrue(explain - .contains("`db1`.`table1`.`__DORIS_DELETE_SIGN__` = 0")); - Assert.assertTrue(explain - .contains("`db1`.`table2`.`__DORIS_DELETE_SIGN__` = 0")); - Assert.assertFalse(explain.contains("other predicates:")); - String sql3 = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * FROM db1.table1"; - Assert.assertTrue(dorisAssert.query(sql3).explainQuery() - .contains("`db1`.`table1`.`__DORIS_DELETE_SIGN__` = 0")); - String sql4 = " SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * FROM db1.table1 table2"; - Assert.assertTrue(dorisAssert.query(sql4).explainQuery() - .contains("`table2`.`__DORIS_DELETE_SIGN__` = 0")); - new MockUp() { - @Mock - public boolean showHiddenColumns() { - return true; - } - }; - String sql5 = "SELECT * FROM db1.table1 LEFT ANTI JOIN db1.table2 ON db1.table1.siteid = db1.table2.siteid;"; - Assert.assertFalse(dorisAssert.query(sql5).explainQuery().contains("`table1`.`__DORIS_DELETE_SIGN__` = 0")); - String sql6 = "SELECT * FROM db1.table1 JOIN db1.table2 ON db1.table1.siteid = db1.table2.siteid;"; - Assert.assertFalse(dorisAssert.query(sql6).explainQuery().contains("`table1`.`__DORIS_DELETE_SIGN__` = 0")); - String sql7 = "SELECT * FROM db1.table1"; - Assert.assertFalse(dorisAssert.query(sql7).explainQuery().contains("`table1`.`__DORIS_DELETE_SIGN__` = 0")); - String sql8 = " SELECT * FROM db1.table1 table2"; - Assert.assertFalse(dorisAssert.query(sql8).explainQuery().contains("`table2`.`__DORIS_DELETE_SIGN__` = 0")); - } - - @Test - public void testSelectHints() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - - // hint with integer literal parameter - String sql = "select /*+ common_hint(1) */ 1"; - UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - - // hint with float literal parameter - sql = "select /*+ common_hint(1.1) */ 1"; - UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - - // hint with string literal parameter - sql = "select /*+ common_hint(\"string\") */ 1"; - UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - - // hint with key value parameter - sql = "select /*+ common_hint(k = \"v\") */ 1"; - UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - - // hint with multi-parameters - sql = "select /*+ common_hint(1, 1.1, \"string\") */ 1"; - UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - - // multi-hints - sql = "select /*+ common_hint(1) another_hint(2) */ 1"; - UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - } - - @Test - public void testSelectHintSetVar() throws Exception { - String sql = "SELECT sleep(3);"; - OriginalPlanner planner = (OriginalPlanner) dorisAssert.query(sql).internalExecuteOneAndGetPlan(); - Assert.assertEquals(VariableMgr.getDefaultSessionVariable().getQueryTimeoutS(), - planner.getPlannerContext().getQueryOptions().query_timeout); - - sql = "SELECT /*+ SET_VAR(query_timeout = 1) */ sleep(3);"; - planner = (OriginalPlanner) dorisAssert.query(sql).internalExecuteOneAndGetPlan(); - Assert.assertEquals(1, planner.getPlannerContext().getQueryOptions().query_timeout); - - sql = "select * from db1.partition_table where datekey=20200726"; - planner = (OriginalPlanner) dorisAssert.query(sql).internalExecuteOneAndGetPlan(); - Assert.assertEquals(VariableMgr.getDefaultSessionVariable().getMaxExecMemByte(), - planner.getPlannerContext().getQueryOptions().mem_limit); - - sql = "select /*+ SET_VAR(exec_mem_limit = 8589934592) */ poi_id, count(*) from db1.partition_table " - + "where datekey=20200726 group by 1"; - planner = (OriginalPlanner) dorisAssert.query(sql).internalExecuteOneAndGetPlan(); - Assert.assertEquals(8589934592L, planner.getPlannerContext().getQueryOptions().mem_limit); - - int queryTimeOut = dorisAssert.getSessionVariable().getQueryTimeoutS(); - long execMemLimit = dorisAssert.getSessionVariable().getMaxExecMemByte(); - sql = "select /*+ SET_VAR(exec_mem_limit = 8589934592, query_timeout = 1) */ 1 + 2;"; - planner = (OriginalPlanner) dorisAssert.query(sql).internalExecuteOneAndGetPlan(); - // session variable have been changed - Assert.assertEquals(1, planner.getPlannerContext().getQueryOptions().query_timeout); - Assert.assertEquals(8589934592L, planner.getPlannerContext().getQueryOptions().mem_limit); - // session variable change have been reverted - Assert.assertEquals(queryTimeOut, dorisAssert.getSessionVariable().getQueryTimeoutS()); - Assert.assertEquals(execMemLimit, dorisAssert.getSessionVariable().getMaxExecMemByte()); - } - - @Test - public void testWithWithoutDatabase() throws Exception { - String sql = "with tmp as (select count(*) from db1.table1) select * from tmp;"; - dorisAssert.withoutUseDatabase(); - dorisAssert.query(sql).explainQuery(); - - sql = "with tmp as (select * from db1.table1) " - + "select a.siteid, b.citycode, a.siteid from (select siteid, citycode from tmp) a " - + "left join (select siteid, citycode from tmp) b on a.siteid = b.siteid;"; - dorisAssert.withoutUseDatabase(); - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testWithInNestedQueryStmt() throws Exception { - String sql = "select 1 from (with w as (select 1 from db1.table1) select 1 from w) as tt"; - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testGetTableRefs() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql = "SELECT * FROM db1.table1 JOIN db1.table2 ON db1.table1.siteid = db1.table2.siteid;"; - dorisAssert.query(sql).explainQuery(); - QueryStmt stmt = (QueryStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - List tblRefs = Lists.newArrayList(); - Set parentViewNameSet = Sets.newHashSet(); - stmt.getTableRefs(new Analyzer(ctx.getEnv(), ctx), tblRefs, parentViewNameSet); - - Assert.assertEquals(2, tblRefs.size()); - Assert.assertEquals("table1", tblRefs.get(0).getName().getTbl()); - Assert.assertEquals("table2", tblRefs.get(1).getName().getTbl()); - } - - @Test - public void testOutfile() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - Config.enable_outfile_to_local = true; - String sql - = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"required,byte_array,col0\");"; - dorisAssert.query(sql).explainQuery(); - // if shema not set, gen schema - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET;"; - try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - Assert.assertEquals(1, stmt.getOutFileClause().getParquetSchemas().size()); - Assert.assertEquals("k1", stmt.getOutFileClause().getParquetSchemas().get(0).schema_column_name); - } catch (Exception e) { - Assert.fail(e.getMessage()); - } - - // schema can not be empty - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"\");"; - try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Parquet schema property should not be empty")); - } - - // schema must contains 3 fields - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"int32,siteid;\");"; - try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("must only contains repetition type/column type/column name")); - } - - // unknown repetition type - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"repeat, int32,siteid;\");"; - try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("unknown repetition type")); - } - - // only support required type - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"repeated,int32,siteid;\");"; - try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("currently only support required type")); - } - - // unknown data type - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\" FORMAT AS PARQUET PROPERTIES (\"schema\"=\"required,int128,siteid;\");"; - try { - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("data type is not supported")); - } - - // contains parquet properties - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"file:///root/doris/\"" - + " FORMAT AS PARQUET" - + " PROPERTIES (\"schema\"=\"required,byte_array,siteid;\"," - + " 'parquet.compression'='snappy');"; - dorisAssert.query(sql).explainQuery(); - // support parquet for broker - sql = "SELECT k1 FROM db1.tbl1 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" FORMAT AS PARQUET " - + "PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " - + "\"broker.hadoop.security.authentication\" = \"kerberos\", " - + "\"broker.kerberos_principal\" = \"test\", " - + "\"broker.kerberos_keytab_content\" = \"test\" , " - + "\"schema\"=\"required,byte_array,siteid;\");"; - dorisAssert.query(sql).explainQuery(); - - // do not support large int type - try { - sql = "SELECT k5 FROM db1.tbl1 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" FORMAT AS PARQUET " - + "PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " - + "\"broker.hadoop.security.authentication\" = \"kerberos\", " - + "\"broker.kerberos_principal\" = \"test\", " - + "\"broker.kerberos_keytab_content\" = \"test\" ," - + " \"schema\"=\"required,int32,siteid;\");"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - e.printStackTrace(); - Assert.assertTrue(e.getMessage().contains("should use byte_array")); - } - - // do not support large int type, contains function - try { - sql = "SELECT sum(k5) FROM db1.tbl1 group by k5 INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" " - + "FORMAT AS PARQUET PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " - + "\"broker.hadoop.security.authentication\" = \"kerberos\", " - + "\"broker.kerberos_principal\" = \"test\", " - + "\"broker.kerberos_keytab_content\" = \"test\" , " - + "\"schema\"=\"required,int32,siteid;\");"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("should use byte_array")); - } - - // support cast - try { - sql = "SELECT cast(sum(k5) as bigint) FROM db1.tbl1 group by k5" - + " INTO OUTFILE \"hdfs://test/test_sql_prc_2019_02_19/\" " - + "FORMAT AS PARQUET PROPERTIES ( \"broker.name\" = \"hdfs_broker\", " - + "\"broker.hadoop.security.authentication\" = \"kerberos\", " - + "\"broker.kerberos_principal\" = \"test\", " - + "\"broker.kerberos_keytab_content\" = \"test\" , " - + "\"schema\"=\"required,int64,siteid;\");"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); // CHECKSTYLE IGNORE THIS LINE - } catch (Exception e) { - Assert.fail(e.getMessage()); - } - } - - @Test - public void testSystemViewCaseInsensitive() throws Exception { - String sql1 = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " - + "'ech_dw' ORDER BY ROUTINES.ROUTINE_SCHEMA\n"; - // The system view names in information_schema are case-insensitive, - dorisAssert.query(sql1).explainQuery(); - - String sql2 = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ ROUTINE_SCHEMA, ROUTINE_NAME FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = " - + "'ech_dw' ORDER BY routines.ROUTINE_SCHEMA\n"; - try { - // Should not refer to one of system views using different cases within the same statement. - // sql2 is wrong because 'ROUTINES' and 'routines' are used. - dorisAssert.query(sql2).explainQuery(); - Assert.fail("Refer to one of system views using different cases within the same statement is wrong."); - } catch (AnalysisException e) { - System.out.println(e.getMessage()); - } - } - - @Test - public void testWithUnionToSql() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql1 = - "select \n" - + " t.k1 \n" - + "from (\n" - + " with \n" - + " v1 as (select t1.k1 from db1.tbl1 t1),\n" - + " v2 as (select t2.k1 from db1.tbl1 t2)\n" - + " select v1.k1 as k1 from v1\n" - + " union\n" - + " select v2.k1 as k1 from v2\n" - + ") t"; - SelectStmt stmt1 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql1, ctx); - stmt1.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertEquals("SELECT `t`.`k1` AS `k1` FROM (WITH v1 AS (SELECT `t1`.`k1` " - + "FROM `db1`.`tbl1` t1),v2 AS (SELECT `t2`.`k1` FROM `db1`.`tbl1` t2) " - + "SELECT `v1`.`k1` AS `k1` FROM `v1` UNION SELECT `v2`.`k1` AS `k1` FROM `v2`) t", stmt1.toSql()); - - String sql2 = - "with\n" - + " v1 as (select t1.k1 from db1.tbl1 t1),\n" - + " v2 as (select t2.k1 from db1.tbl1 t2)\n" - + "select\n" - + " t.k1\n" - + "from (\n" - + " select v1.k1 as k1 from v1\n" - + " union\n" - + " select v2.k1 as k1 from v2\n" - + ") t"; - SelectStmt stmt2 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql2, ctx); - stmt2.rewriteExprs(new Analyzer(ctx.getEnv(), ctx).getExprRewriter()); - Assert.assertTrue(stmt2.toSql().contains("WITH v1 AS (SELECT `t1`.`k1` FROM `db1`.`tbl1` t1)," - + "v2 AS (SELECT `t2`.`k1` FROM `db1`.`tbl1` t2)")); - } - - @Test - public void testSelectOuterJoinSql() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql1 = "select l.citycode, group_concat(distinct r.username) from db1.table1 l " - + "left join db1.table2 r on l.citycode=r.citycode group by l.citycode"; - SelectStmt stmt1 = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql1, ctx); - Assert.assertFalse(stmt1.getAnalyzer().getSlotDesc(new SlotId(2)).getIsNullable()); - Assert.assertFalse(stmt1.getAnalyzer().getSlotDescriptor("r.username").getIsNullable()); - FunctionCallExpr expr = (FunctionCallExpr) stmt1.getSelectList().getItems().get(1).getExpr(); - Assert.assertTrue(expr.getFnParams().isDistinct()); - } - - @Test - public void testHashBucketSelectTablet() throws Exception { - String sql1 = "SELECT * FROM db1.table1 TABLET(10031,10032,10033)"; - OriginalPlanner planner = (OriginalPlanner) dorisAssert.query(sql1).internalExecuteOneAndGetPlan(); - Set sampleTabletIds = ((OlapScanNode) planner.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertTrue(sampleTabletIds.contains(10031L)); - Assert.assertTrue(sampleTabletIds.contains(10032L)); - Assert.assertTrue(sampleTabletIds.contains(10033L)); - } - - @Test - public void testRandomBucketSelectTablet() throws Exception { - String sql1 = "SELECT * FROM db1.table3 TABLET(10031,10032,10033)"; - OriginalPlanner planner = (OriginalPlanner) dorisAssert.query(sql1).internalExecuteOneAndGetPlan(); - Set sampleTabletIds = ((OlapScanNode) planner.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertTrue(sampleTabletIds.contains(10031L)); - Assert.assertTrue(sampleTabletIds.contains(10032L)); - Assert.assertTrue(sampleTabletIds.contains(10033L)); - } - - @Test - public void testSelectSampleHashBucketTable() throws Exception { - FeConstants.runningUnitTest = true; - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("db1"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("table1"); - long tabletId = 10031L; - for (Partition partition : tbl.getPartitions()) { - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(10000); - for (Tablet tablet : mIndex.getTablets()) { - tablet.setTabletId(tabletId); - tabletId += 1; - } - } - } - - // 1. TABLESAMPLE ROWS - String sql1 = "SELECT * FROM db1.table1 TABLESAMPLE(10 ROWS)"; - OriginalPlanner planner1 = (OriginalPlanner) dorisAssert.query(sql1).internalExecuteOneAndGetPlan(); - Set sampleTabletIds1 = ((OlapScanNode) planner1.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds1.size()); - - String sql2 = "SELECT * FROM db1.table1 TABLESAMPLE(1000 ROWS)"; - OriginalPlanner planner2 = (OriginalPlanner) dorisAssert.query(sql2).internalExecuteOneAndGetPlan(); - Set sampleTabletIds2 = ((OlapScanNode) planner2.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds2.size()); - - String sql3 = "SELECT * FROM db1.table1 TABLESAMPLE(1001 ROWS)"; - OriginalPlanner planner3 = (OriginalPlanner) dorisAssert.query(sql3).internalExecuteOneAndGetPlan(); - Set sampleTabletIds3 = ((OlapScanNode) planner3.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(2, sampleTabletIds3.size()); - - String sql4 = "SELECT * FROM db1.table1 TABLESAMPLE(9500 ROWS)"; - OriginalPlanner planner4 = (OriginalPlanner) dorisAssert.query(sql4).internalExecuteOneAndGetPlan(); - Set sampleTabletIds4 = ((OlapScanNode) planner4.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(10, sampleTabletIds4.size()); - - String sql5 = "SELECT * FROM db1.table1 TABLESAMPLE(11000 ROWS)"; - OriginalPlanner planner5 = (OriginalPlanner) dorisAssert.query(sql5).internalExecuteOneAndGetPlan(); - Set sampleTabletIds5 = ((OlapScanNode) planner5.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(0, sampleTabletIds5.size()); // no sample, all tablet - - String sql6 = "SELECT * FROM db1.table1 TABLET(10033) TABLESAMPLE(900 ROWS)"; - OriginalPlanner planner6 = (OriginalPlanner) dorisAssert.query(sql6).internalExecuteOneAndGetPlan(); - Set sampleTabletIds6 = ((OlapScanNode) planner6.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertTrue(sampleTabletIds6.size() >= 1 && sampleTabletIds6.size() <= 2); - Assert.assertTrue(sampleTabletIds6.contains(10033L)); - - // 2. TABLESAMPLE PERCENT - String sql7 = "SELECT * FROM db1.table1 TABLESAMPLE(10 PERCENT)"; - OriginalPlanner planner7 = (OriginalPlanner) dorisAssert.query(sql7).internalExecuteOneAndGetPlan(); - Set sampleTabletIds7 = ((OlapScanNode) planner7.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds7.size()); - - String sql8 = "SELECT * FROM db1.table1 TABLESAMPLE(15 PERCENT)"; - OriginalPlanner planner8 = (OriginalPlanner) dorisAssert.query(sql8).internalExecuteOneAndGetPlan(); - Set sampleTabletIds8 = ((OlapScanNode) planner8.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(2, sampleTabletIds8.size()); - - String sql9 = "SELECT * FROM db1.table1 TABLESAMPLE(100 PERCENT)"; - OriginalPlanner planner9 = (OriginalPlanner) dorisAssert.query(sql9).internalExecuteOneAndGetPlan(); - Set sampleTabletIds9 = ((OlapScanNode) planner9.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(0, sampleTabletIds9.size()); - - String sql10 = "SELECT * FROM db1.table1 TABLESAMPLE(110 PERCENT)"; - OriginalPlanner planner10 = (OriginalPlanner) dorisAssert.query(sql10).internalExecuteOneAndGetPlan(); - Set sampleTabletIds10 = ((OlapScanNode) planner10.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(0, sampleTabletIds10.size()); - - String sql11 = "SELECT * FROM db1.table1 TABLET(10033) TABLESAMPLE(5 PERCENT)"; - OriginalPlanner planner11 = (OriginalPlanner) dorisAssert.query(sql11).internalExecuteOneAndGetPlan(); - Set sampleTabletIds11 = ((OlapScanNode) planner11.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertTrue(sampleTabletIds11.size() >= 1 && sampleTabletIds11.size() <= 2); - Assert.assertTrue(sampleTabletIds11.contains(10033L)); - - // 3. TABLESAMPLE REPEATABLE - String sql12 = "SELECT * FROM db1.table1 TABLESAMPLE(900 ROWS)"; - OriginalPlanner planner12 = (OriginalPlanner) dorisAssert.query(sql12).internalExecuteOneAndGetPlan(); - Set sampleTabletIds12 = ((OlapScanNode) planner12.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds12.size()); - - String sql13 = "SELECT * FROM db1.table1 TABLESAMPLE(900 ROWS) REPEATABLE 2"; - OriginalPlanner planner13 = (OriginalPlanner) dorisAssert.query(sql13).internalExecuteOneAndGetPlan(); - Set sampleTabletIds13 = ((OlapScanNode) planner13.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds13.size()); - Assert.assertTrue(sampleTabletIds13.contains(10033L)); - - String sql14 = "SELECT * FROM db1.table1 TABLESAMPLE(900 ROWS) REPEATABLE 10"; - OriginalPlanner planner14 = (OriginalPlanner) dorisAssert.query(sql14).internalExecuteOneAndGetPlan(); - Set sampleTabletIds14 = ((OlapScanNode) planner14.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds14.size()); - Assert.assertTrue(sampleTabletIds14.contains(10031L)); - - String sql15 = "SELECT * FROM db1.table1 TABLESAMPLE(900 ROWS) REPEATABLE 0"; - OriginalPlanner planner15 = (OriginalPlanner) dorisAssert.query(sql15).internalExecuteOneAndGetPlan(); - Set sampleTabletIds15 = ((OlapScanNode) planner15.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds15.size()); - Assert.assertTrue(sampleTabletIds15.contains(10031L)); - - // 4. select returns 900 rows of results - String sql16 = "SELECT * FROM (SELECT * FROM db1.table1 TABLESAMPLE(900 ROWS) REPEATABLE 9999999 limit 900) t"; - OriginalPlanner planner16 = (OriginalPlanner) dorisAssert.query(sql16).internalExecuteOneAndGetPlan(); - Set sampleTabletIds16 = ((OlapScanNode) planner16.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds16.size()); - - String sql17 = "SELECT * FROM db1.table1 TABLESAMPLE(15 PERCENT) where siteid != 0"; - OriginalPlanner planner17 = (OriginalPlanner) dorisAssert.query(sql17).internalExecuteOneAndGetPlan(); - Set sampleTabletIds17 = ((OlapScanNode) planner17.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(2, sampleTabletIds17.size()); - FeConstants.runningUnitTest = false; - } - - @Test - public void testSelectSampleRandomBucketTable() throws Exception { - FeConstants.runningUnitTest = true; - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("db1"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("table3"); - long tabletId = 10031L; - for (Partition partition : tbl.getPartitions()) { - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(10000); - for (Tablet tablet : mIndex.getTablets()) { - tablet.setTabletId(tabletId); - tabletId += 1; - } - } - } - - // 1. TABLESAMPLE ROWS - String sql1 = "SELECT * FROM db1.table3 TABLESAMPLE(10 ROWS)"; - OriginalPlanner planner1 = (OriginalPlanner) dorisAssert.query(sql1).internalExecuteOneAndGetPlan(); - Set sampleTabletIds1 = ((OlapScanNode) planner1.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds1.size()); - - String sql2 = "SELECT * FROM db1.table3 TABLESAMPLE(1000 ROWS)"; - OriginalPlanner planner2 = (OriginalPlanner) dorisAssert.query(sql2).internalExecuteOneAndGetPlan(); - Set sampleTabletIds2 = ((OlapScanNode) planner2.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds2.size()); - - String sql3 = "SELECT * FROM db1.table3 TABLESAMPLE(1001 ROWS)"; - OriginalPlanner planner3 = (OriginalPlanner) dorisAssert.query(sql3).internalExecuteOneAndGetPlan(); - Set sampleTabletIds3 = ((OlapScanNode) planner3.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(2, sampleTabletIds3.size()); - - String sql4 = "SELECT * FROM db1.table3 TABLESAMPLE(9500 ROWS)"; - OriginalPlanner planner4 = (OriginalPlanner) dorisAssert.query(sql4).internalExecuteOneAndGetPlan(); - Set sampleTabletIds4 = ((OlapScanNode) planner4.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(10, sampleTabletIds4.size()); - - String sql5 = "SELECT * FROM db1.table3 TABLESAMPLE(11000 ROWS)"; - OriginalPlanner planner5 = (OriginalPlanner) dorisAssert.query(sql5).internalExecuteOneAndGetPlan(); - Set sampleTabletIds5 = ((OlapScanNode) planner5.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(0, sampleTabletIds5.size()); // no sample, all tablet - - String sql6 = "SELECT * FROM db1.table3 TABLET(10033) TABLESAMPLE(900 ROWS)"; - OriginalPlanner planner6 = (OriginalPlanner) dorisAssert.query(sql6).internalExecuteOneAndGetPlan(); - Set sampleTabletIds6 = ((OlapScanNode) planner6.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertTrue(sampleTabletIds6.size() >= 1 && sampleTabletIds6.size() <= 2); - Assert.assertTrue(sampleTabletIds6.contains(10033L)); - - // 2. TABLESAMPLE PERCENT - String sql7 = "SELECT * FROM db1.table3 TABLESAMPLE(10 PERCENT)"; - OriginalPlanner planner7 = (OriginalPlanner) dorisAssert.query(sql7).internalExecuteOneAndGetPlan(); - Set sampleTabletIds7 = ((OlapScanNode) planner7.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds7.size()); - - String sql8 = "SELECT * FROM db1.table3 TABLESAMPLE(15 PERCENT)"; - OriginalPlanner planner8 = (OriginalPlanner) dorisAssert.query(sql8).internalExecuteOneAndGetPlan(); - Set sampleTabletIds8 = ((OlapScanNode) planner8.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(2, sampleTabletIds8.size()); - - String sql9 = "SELECT * FROM db1.table3 TABLESAMPLE(100 PERCENT)"; - OriginalPlanner planner9 = (OriginalPlanner) dorisAssert.query(sql9).internalExecuteOneAndGetPlan(); - Set sampleTabletIds9 = ((OlapScanNode) planner9.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(0, sampleTabletIds9.size()); - - String sql10 = "SELECT * FROM db1.table3 TABLESAMPLE(110 PERCENT)"; - OriginalPlanner planner10 = (OriginalPlanner) dorisAssert.query(sql10).internalExecuteOneAndGetPlan(); - Set sampleTabletIds10 = ((OlapScanNode) planner10.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(0, sampleTabletIds10.size()); - - String sql11 = "SELECT * FROM db1.table3 TABLET(10033) TABLESAMPLE(5 PERCENT)"; - OriginalPlanner planner11 = (OriginalPlanner) dorisAssert.query(sql11).internalExecuteOneAndGetPlan(); - Set sampleTabletIds11 = ((OlapScanNode) planner11.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertTrue(sampleTabletIds11.size() >= 1 && sampleTabletIds11.size() <= 2); - Assert.assertTrue(sampleTabletIds11.contains(10033L)); - - // 3. TABLESAMPLE REPEATABLE - String sql12 = "SELECT * FROM db1.table3 TABLESAMPLE(900 ROWS)"; - OriginalPlanner planner12 = (OriginalPlanner) dorisAssert.query(sql12).internalExecuteOneAndGetPlan(); - Set sampleTabletIds12 = ((OlapScanNode) planner12.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds12.size()); - - String sql13 = "SELECT * FROM db1.table3 TABLESAMPLE(900 ROWS) REPEATABLE 2"; - OriginalPlanner planner13 = (OriginalPlanner) dorisAssert.query(sql13).internalExecuteOneAndGetPlan(); - Set sampleTabletIds13 = ((OlapScanNode) planner13.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds13.size()); - Assert.assertTrue(sampleTabletIds13.contains(10033L)); - - String sql14 = "SELECT * FROM db1.table3 TABLESAMPLE(900 ROWS) REPEATABLE 10"; - OriginalPlanner planner14 = (OriginalPlanner) dorisAssert.query(sql14).internalExecuteOneAndGetPlan(); - Set sampleTabletIds14 = ((OlapScanNode) planner14.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds14.size()); - Assert.assertTrue(sampleTabletIds14.contains(10031L)); - - String sql15 = "SELECT * FROM db1.table3 TABLESAMPLE(900 ROWS) REPEATABLE 0"; - OriginalPlanner planner15 = (OriginalPlanner) dorisAssert.query(sql15).internalExecuteOneAndGetPlan(); - Set sampleTabletIds15 = ((OlapScanNode) planner15.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds15.size()); - Assert.assertTrue(sampleTabletIds15.contains(10031L)); - - // 4. select returns 900 rows of results - String sql16 = "SELECT * FROM (SELECT * FROM db1.table3 TABLESAMPLE(900 ROWS) REPEATABLE 9999999 limit 900) t"; - OriginalPlanner planner16 = (OriginalPlanner) dorisAssert.query(sql16).internalExecuteOneAndGetPlan(); - Set sampleTabletIds16 = ((OlapScanNode) planner16.getScanNodes().get(0)).getSampleTabletIds(); - Assert.assertEquals(1, sampleTabletIds16.size()); - FeConstants.runningUnitTest = false; - } - - - @Test - public void testSelectExcept() throws Exception { - ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - String sql = "SELECT * EXCEPT (siteid) FROM db1.table1"; - SelectStmt stmt = (SelectStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx); - Assert.assertFalse(stmt.getColLabels().contains("siteid")); - Assert.assertEquals(stmt.resultExprs.size(), 3); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/SetVariableTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/SetVariableTest.java index 703ba49c826352..1148f64de3fd10 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/SetVariableTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/SetVariableTest.java @@ -58,7 +58,7 @@ public void testSqlMode() throws Exception { stmtExecutor.execute(); Expr expr = stmtExecutor.getParsedStmt().getResultExprs().get(0); Assert.assertTrue(expr instanceof SlotRef); - Assert.assertSame(expr.getType(), Type.VARCHAR); + Assert.assertSame(Type.STRING, expr.getType()); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java deleted file mode 100644 index 7958994214702e..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/StmtRewriterTest.java +++ /dev/null @@ -1,627 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.FeConstants; -import org.apache.doris.utframe.DorisAssert; -import org.apache.doris.utframe.UtFrameUtils; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.UUID; - -public class StmtRewriterTest { - private static final Logger LOG = LogManager.getLogger(StmtRewriterTest.class); - - private static String baseDir = "fe"; - private static String runningDir = baseDir + "/mocked/StmtRewriterTest/" - + UUID.randomUUID().toString() + "/"; - private static final String TABLE_NAME = "table1"; - private static final String DB_NAME = "db1"; - private static DorisAssert dorisAssert; - - @BeforeClass - public static void beforeClass() throws Exception { - FeConstants.runningUnitTest = true; - UtFrameUtils.createDorisCluster(runningDir); - dorisAssert = new DorisAssert(); - dorisAssert.withDatabase(DB_NAME).useDatabase(DB_NAME); - String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME + " (empid int, name varchar, " - + "deptno int, salary int, commission int) " - + "distributed by hash(empid) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - } - - /** - * The whole query plan is following: - +-----------------------------------------+ - | Explain String | - +-----------------------------------------+ - | PLAN FRAGMENT 0 | - | OUTPUT EXPRS: | | - | PARTITION: UNPARTITIONED | - | | - | RESULT SINK | - | | - | 10:EXCHANGE | - | tuple ids: 1 5 | - | | - | PLAN FRAGMENT 1 | - | OUTPUT EXPRS: | - | PARTITION: HASH_PARTITIONED: | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 10 | - | UNPARTITIONED | - | | - | 4:CROSS JOIN | - | | cross join: | - | | predicates: > | - | | tuple ids: 1 5 | - | | | - | |----9:EXCHANGE | - | | tuple ids: 5 | - | | | - | 6:AGGREGATE (merge finalize) | - | | output: sum() | - | | group by: | - | | tuple ids: 1 | - | | | - | 5:EXCHANGE | - | tuple ids: 1 | - | | - | PLAN FRAGMENT 2 | - | OUTPUT EXPRS: | - | PARTITION: UNPARTITIONED | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 09 | - | UNPARTITIONED | - | | - | 8:AGGREGATE (merge finalize) | - | | output: avg() | - | | group by: | - | | tuple ids: 5 | - | | | - | 7:EXCHANGE | - | tuple ids: 4 | - | | - | PLAN FRAGMENT 3 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 07 | - | UNPARTITIONED | - | | - | 3:AGGREGATE (update serialize) | - | | output: avg(`salary`) | - | | group by: | - | | tuple ids: 4 | - | | | - | 2:OlapScanNode | - | TABLE: all_type_table | - | PREAGGREGATION: ON | - | rollup: all_type_table | - | tuple ids: 3 | - | | - | PLAN FRAGMENT 4 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 05 | - | HASH_PARTITIONED: | - | | - | 1:AGGREGATE (update serialize) | - | | STREAMING | - | | output: sum(`salary`) | - | | group by: `empid` | - | | tuple ids: 1 | - | | | - | 0:OlapScanNode | - | TABLE: all_type_table | - | PREAGGREGATION: ON | - | rollup: all_type_table | - | tuple ids: 0 | - +-----------------------------------------+ - * - * @throws Exception - */ - @Test - public void testRewriteHavingClauseSubqueries() throws Exception { - String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ empid, sum(salary) from " + TABLE_NAME + " group by empid having sum(salary) > (" - + subquery + ");"; - LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query).explainContains("CROSS JOIN"); - } - - /** - * +-----------------------------------------+ - | Explain String | - +-----------------------------------------+ - | PLAN FRAGMENT 0 | - | OUTPUT EXPRS: | | - | PARTITION: UNPARTITIONED | - | | - | RESULT SINK | - | | - | 11:MERGING-EXCHANGE | - | limit: 65535 | - | tuple ids: 7 | - | | - | PLAN FRAGMENT 1 | - | OUTPUT EXPRS: | - | PARTITION: HASH_PARTITIONED: | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 11 | - | UNPARTITIONED | - | | - | 5:TOP-N | - | | order by: ASC | - | | offset: 0 | - | | limit: 65535 | - | | tuple ids: 7 | - | | | - | 4:CROSS JOIN | - | | cross join: | - | | predicates: > | - | | tuple ids: 1 5 | - | | | - | |----10:EXCHANGE | - | | tuple ids: 5 | - | | | - | 7:AGGREGATE (merge finalize) | - | | output: sum() | - | | group by: | - | | tuple ids: 1 | - | | | - | 6:EXCHANGE | - | tuple ids: 1 | - | | - | PLAN FRAGMENT 2 | - | OUTPUT EXPRS: | - | PARTITION: UNPARTITIONED | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 10 | - | UNPARTITIONED | - | | - | 9:AGGREGATE (merge finalize) | - | | output: avg() | - | | group by: | - | | tuple ids: 5 | - | | | - | 8:EXCHANGE | - | tuple ids: 4 | - | | - | PLAN FRAGMENT 3 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 08 | - | UNPARTITIONED | - | | - | 3:AGGREGATE (update serialize) | - | | output: avg(`salary`) | - | | group by: | - | | tuple ids: 4 | - | | | - | 2:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 3 | - | | - | PLAN FRAGMENT 4 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 06 | - | HASH_PARTITIONED: | - | | - | 1:AGGREGATE (update serialize) | - | | STREAMING | - | | output: sum(`salary`) | - | | group by: `empid` | - | | tuple ids: 1 | - | | | - | 0:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 0 | - +-----------------------------------------+ - * @throws Exception - */ - @Test - public void testRewriteHavingClauseWithOrderBy() throws Exception { - String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ empid a, sum(salary) from " + TABLE_NAME + " group by empid having sum(salary) > (" - + subquery + ") order by a;"; - LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query).explainContains("CROSS JOIN", - "order by: `$a$1`.`$c$1` ASC"); - } - - /** - * +-----------------------------------------+ - | Explain String | - +-----------------------------------------+ - | PLAN FRAGMENT 0 | - | OUTPUT EXPRS: | - | PARTITION: UNPARTITIONED | - | | - | RESULT SINK | - | | - | 11:MERGING-EXCHANGE | - | limit: 65535 | - | tuple ids: 7 | - | | - | PLAN FRAGMENT 1 | - | OUTPUT EXPRS: | - | PARTITION: HASH_PARTITIONED: | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 11 | - | UNPARTITIONED | - | | - | 5:TOP-N | - | | order by: ASC | - | | offset: 0 | - | | limit: 65535 | - | | tuple ids: 7 | - | | | - | 4:CROSS JOIN | - | | cross join: | - | | predicates: > | - | | tuple ids: 1 5 | - | | | - | |----10:EXCHANGE | - | | tuple ids: 5 | - | | | - | 7:AGGREGATE (merge finalize) | - | | output: sum() | - | | group by: | - | | tuple ids: 1 | - | | | - | 6:EXCHANGE | - | tuple ids: 1 | - | | - | PLAN FRAGMENT 2 | - | OUTPUT EXPRS: | - | PARTITION: UNPARTITIONED | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 10 | - | UNPARTITIONED | - | | - | 9:AGGREGATE (merge finalize) | - | | output: avg() | - | | group by: | - | | tuple ids: 5 | - | | | - | 8:EXCHANGE | - | tuple ids: 4 | - | | - | PLAN FRAGMENT 3 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 08 | - | UNPARTITIONED | - | | - | 3:AGGREGATE (update serialize) | - | | output: avg(`salary`) | - | | group by: | - | | tuple ids: 4 | - | | | - | 2:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 3 | - | | - | PLAN FRAGMENT 4 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 06 | - | HASH_PARTITIONED: | - | | - | 1:AGGREGATE (update serialize) | - | | STREAMING | - | | output: sum(`salary`) | - | | group by: `empid` | - | | tuple ids: 1 | - | | | - | 0:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 0 | - +-----------------------------------------+ - * @throws Exception - */ - @Test - public void testRewriteHavingClauseMissingAggregationColumn() throws Exception { - String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ empid a from " + TABLE_NAME + " group by empid having sum(salary) > (" - + subquery + ") order by sum(salary);"; - LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query).explainContains("group by: `empid`", - "CROSS JOIN", - "order by: `$a$1`.`$c$2` ASC", - "OUTPUT EXPRS:\n `$a$1`.`$c$1`"); - } - - /** - +-----------------------------------------+ - | Explain String | - +-----------------------------------------+ - | PLAN FRAGMENT 0 | - | OUTPUT EXPRS: | | - | PARTITION: UNPARTITIONED | - | | - | RESULT SINK | - | | - | 11:MERGING-EXCHANGE | - | limit: 65535 | - | tuple ids: 7 | - | | - | PLAN FRAGMENT 1 | - | OUTPUT EXPRS: | - | PARTITION: HASH_PARTITIONED: | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 11 | - | UNPARTITIONED | - | | - | 5:TOP-N | - | | order by: ASC | - | | offset: 0 | - | | limit: 65535 | - | | tuple ids: 7 | - | | | - | 4:CROSS JOIN | - | | cross join: | - | | predicates: > | - | | tuple ids: 1 5 | - | | | - | |----10:EXCHANGE | - | | tuple ids: 5 | - | | | - | 7:AGGREGATE (merge finalize) | - | | output: sum() | - | | group by: | - | | tuple ids: 1 | - | | | - | 6:EXCHANGE | - | tuple ids: 1 | - | | - | PLAN FRAGMENT 2 | - | OUTPUT EXPRS: | - | PARTITION: UNPARTITIONED | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 10 | - | UNPARTITIONED | - | | - | 9:AGGREGATE (merge finalize) | - | | output: avg() | - | | group by: | - | | tuple ids: 5 | - | | | - | 8:EXCHANGE | - | tuple ids: 4 | - | | - | PLAN FRAGMENT 3 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 08 | - | UNPARTITIONED | - | | - | 3:AGGREGATE (update serialize) | - | | output: avg(`salary`) | - | | group by: | - | | tuple ids: 4 | - | | | - | 2:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 3 | - | | - | PLAN FRAGMENT 4 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 06 | - | HASH_PARTITIONED: | - | | - | 1:AGGREGATE (update serialize) | - | | STREAMING | - | | output: sum(`salary`) | - | | group by: `empid` | - | | tuple ids: 1 | - | | | - | 0:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 0 | - +-----------------------------------------+ - 106 rows in set (0.02 sec) - * @throws Exception - */ - @Test - public void testRewriteHavingClauseWithAlias() throws Exception { - String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" - + subquery + ") order by b;"; - LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query).explainContains("group by: `empid`", - "CROSS JOIN", - "order by: `$a$1`.`$c$2` ASC", - "OUTPUT EXPRS:\n `$a$1`.`$c$1`\n `$a$1`.`$c$2`"); - } - - /** - +-----------------------------------------+ - | Explain String | - +-----------------------------------------+ - | PLAN FRAGMENT 0 | - | OUTPUT EXPRS: | | - | PARTITION: UNPARTITIONED | - | | - | RESULT SINK | - | | - | 11:MERGING-EXCHANGE | - | limit: 100 | - | tuple ids: 7 | - | | - | PLAN FRAGMENT 1 | - | OUTPUT EXPRS: | - | PARTITION: HASH_PARTITIONED: | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 11 | - | UNPARTITIONED | - | | - | 5:TOP-N | - | | order by: ASC | - | | offset: 0 | - | | limit: 100 | - | | tuple ids: 7 | - | | | - | 4:CROSS JOIN | - | | cross join: | - | | predicates: > | - | | tuple ids: 1 5 | - | | | - | |----10:EXCHANGE | - | | tuple ids: 5 | - | | | - | 7:AGGREGATE (merge finalize) | - | | output: sum() | - | | group by: | - | | tuple ids: 1 | - | | | - | 6:EXCHANGE | - | tuple ids: 1 | - | | - | PLAN FRAGMENT 2 | - | OUTPUT EXPRS: | - | PARTITION: UNPARTITIONED | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 10 | - | UNPARTITIONED | - | | - | 9:AGGREGATE (merge finalize) | - | | output: avg() | - | | group by: | - | | tuple ids: 5 | - | | | - | 8:EXCHANGE | - | tuple ids: 4 | - | | - | PLAN FRAGMENT 3 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 08 | - | UNPARTITIONED | - | | - | 3:AGGREGATE (update serialize) | - | | output: avg(`salary`) | - | | group by: | - | | tuple ids: 4 | - | | | - | 2:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 3 | - | | - | PLAN FRAGMENT 4 | - | OUTPUT EXPRS: | - | PARTITION: RANDOM | - | | - | STREAM DATA SINK | - | EXCHANGE ID: 06 | - | HASH_PARTITIONED: | - | | - | 1:AGGREGATE (update serialize) | - | | STREAMING | - | | output: sum(`salary`) | - | | group by: `empid` | - | | tuple ids: 1 | - | | | - | 0:OlapScanNode | - | TABLE: table1 | - | PREAGGREGATION: ON | - | rollup: table1 | - | tuple ids: 0 | - +-----------------------------------------+ - * @throws Exception - */ - @Test - public void testRewriteHavingClausewWithLimit() throws Exception { - String subquery = "select avg(salary) from " + TABLE_NAME; - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" - + subquery + ") order by b limit 100;"; - LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query).explainContains("group by: `empid`", - "CROSS JOIN", - "order by: `$a$1`.`$c$2` ASC", - "OUTPUT EXPRS:\n `$a$1`.`$c$1`\n `$a$1`.`$c$2`"); - } - - /** - * ISSUE-3205 - */ - @Test - public void testRewriteHavingClauseWithBetweenAndInSubquery() throws Exception { - String subquery = "select avg(salary) from " + TABLE_NAME + " where empid between 1 and 2"; - String query = - "select /*+ SET_VAR(enable_nereids_planner=false) */ empid a, sum(salary) b from " + TABLE_NAME + " group by a having b > (" + subquery + ");"; - LOG.info("EXPLAIN:{}", dorisAssert.query(query).explainQuery()); - dorisAssert.query(query) - .explainContains("CROSS JOIN"); - } - - @AfterClass - public static void afterClass() throws Exception { - UtFrameUtils.cleanDorisFeDir(baseDir); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java index 2c8625ae0e228d..e1bf796104bbc9 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/TableNameComparedLowercaseTest.java @@ -18,7 +18,6 @@ package org.apache.doris.analysis; import org.apache.doris.catalog.Env; -import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.qe.GlobalVariable; import org.apache.doris.utframe.DorisAssert; @@ -88,22 +87,6 @@ public void testTableNameLowerCasTe() { Assert.assertTrue(tableNames.contains("TABLE2")); } - @Test - public void testQueryTableNameCaseInsensitive() throws Exception { - String sql1 = "select /*+ SET_VAR(enable_nereids_planner=false) */ Table1.siteid, Table2.k2 from Table1 join Table2 on Table1.siteid = Table2.k1" - + " where Table2.k5 > 1000 order by Table1.siteid"; - dorisAssert.query(sql1).explainQuery(); - - String sql2 = "select /*+ SET_VAR(enable_nereids_planner=false) */ Table1.siteid, Table2.k2 from table1 join table2 on TAble1.siteid = TAble2.k1" - + " where TABle2.k5 > 1000 order by TABLe1.siteid"; - try { - dorisAssert.query(sql2).explainQuery(); - Assert.fail("Different references to the same table name are used: 'table1', 'TAble1'"); - } catch (AnalysisException e) { - System.out.println(e.getMessage()); - } - } - @Test public void testCreateSameTableFailed() { String table2 = "create table db1.TABle2(k1 int, k2 varchar(32), k3 varchar(32)) " diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java index 17a4b5fe04a730..7368e6fb592427 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateFunctionTest.java @@ -23,7 +23,6 @@ import org.apache.doris.analysis.Expr; import org.apache.doris.analysis.FunctionCallExpr; import org.apache.doris.analysis.StringLiteral; -import org.apache.doris.common.Config; import org.apache.doris.common.FeConstants; import org.apache.doris.common.jmockit.Deencapsulation; import org.apache.doris.planner.PlanFragment; @@ -61,7 +60,6 @@ public static void setup() throws Exception { FeConstants.runningUnitTest = true; // create connect context connectContext = UtFrameUtils.createDefaultCtx(); - connectContext.getSessionVariable().setEnableNereidsPlanner(false); } @AfterClass @@ -73,8 +71,6 @@ public static void teardown() { @Test public void test() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - ctx.getSessionVariable().setEnableNereidsPlanner(false); - ctx.getSessionVariable().setEnableFoldConstantByBe(false); // create database db1 createDatabase(ctx, "create database db1;"); @@ -114,102 +110,13 @@ public void test() throws Exception { queryStr = "select db1.id_masking(k1) from db1.tbl1"; Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "concat(left(CAST(`k1` AS VARCHAR(65533)), 3), '****', right(CAST(`k1` AS VARCHAR(65533)), 4))")); - - // create alias function with cast - // cast any type to decimal with specific precision and scale - createFuncStr = "create alias function db1.decimal(all, int, int) with parameter(col, precision, scale)" - + " as cast(col as decimal(precision, scale));"; - createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); - Env.getCurrentEnv().createFunction(createFunctionStmt); - - functions = db.getFunctions(); - Assert.assertEquals(2, functions.size()); - - queryStr = "select db1.decimal(333, 4, 1);"; - ctx.getState().reset(); - stmtExecutor = new StmtExecutor(ctx, queryStr); - stmtExecutor.execute(); - Assert.assertNotEquals(QueryState.MysqlStateType.ERR, ctx.getState().getStateType()); - planner = stmtExecutor.planner(); - Assert.assertEquals(1, planner.getFragments().size()); - fragment = planner.getFragments().get(0); - Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode) fragment.getPlanRoot(); - constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); - System.out.println(constExprLists.get(0).get(0)); - Assert.assertTrue(constExprLists.get(0).get(0) instanceof StringLiteral); - - queryStr = "select db1.decimal(k3, 4, 1) from db1.tbl1;"; - if (Config.enable_decimal_conversion) { - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k3` AS decimalv3(4,1))")); - } else { - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k3` AS decimal(4,1))")); - } - - // cast any type to varchar with fixed length - createFuncStr = "create alias function db1.varchar(all, int) with parameter(text, length) as " - + "cast(text as varchar(length));"; - createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); - Env.getCurrentEnv().createFunction(createFunctionStmt); - - functions = db.getFunctions(); - Assert.assertEquals(3, functions.size()); - - queryStr = "select db1.varchar(333, 4);"; - ctx.getState().reset(); - stmtExecutor = new StmtExecutor(ctx, queryStr); - stmtExecutor.execute(); - Assert.assertNotEquals(QueryState.MysqlStateType.ERR, ctx.getState().getStateType()); - planner = stmtExecutor.planner(); - Assert.assertEquals(1, planner.getFragments().size()); - fragment = planner.getFragments().get(0); - Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode) fragment.getPlanRoot(); - constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); - Assert.assertEquals(1, constExprLists.size()); - Assert.assertEquals(1, constExprLists.get(0).size()); - Assert.assertTrue(constExprLists.get(0).get(0) instanceof StringLiteral); - - queryStr = "select db1.varchar(k1, 4) from db1.tbl1;"; - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k1` AS varchar(65533))")); - - // cast any type to char with fixed length - createFuncStr = "create alias function db1.to_char(all, int) with parameter(text, length) as " - + "cast(text as char(length));"; - createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); - Env.getCurrentEnv().createFunction(createFunctionStmt); - - functions = db.getFunctions(); - Assert.assertEquals(4, functions.size()); - - queryStr = "select db1.to_char(333, 4);"; - ctx.getState().reset(); - stmtExecutor = new StmtExecutor(ctx, queryStr); - stmtExecutor.execute(); - Assert.assertNotEquals(QueryState.MysqlStateType.ERR, ctx.getState().getStateType()); - planner = stmtExecutor.planner(); - Assert.assertEquals(1, planner.getFragments().size()); - fragment = planner.getFragments().get(0); - Assert.assertTrue(fragment.getPlanRoot() instanceof UnionNode); - unionNode = (UnionNode) fragment.getPlanRoot(); - constExprLists = Deencapsulation.getField(unionNode, "constExprLists"); - Assert.assertEquals(1, constExprLists.size()); - Assert.assertEquals(1, constExprLists.get(0).size()); - Assert.assertTrue(constExprLists.get(0).get(0) instanceof StringLiteral); - - queryStr = "select db1.to_char(k1, 4) from db1.tbl1;"; - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k1` AS character")); + "concat(left(CAST(CAST(k1 as BIGINT) AS VARCHAR(65533)), 3), '****'," + + " right(CAST(CAST(k1 AS BIGINT) AS VARCHAR(65533)), 4))")); } @Test public void testCreateGlobalFunction() throws Exception { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); - ctx.getSessionVariable().setEnableNereidsPlanner(false); ctx.getSessionVariable().setEnableFoldConstantByBe(false); // 1. create database db2 @@ -240,61 +147,8 @@ public void testCreateGlobalFunction() throws Exception { queryStr = "select id_masking(k1) from db2.tbl1"; Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "concat(left(CAST(`k1` AS varchar(65533)), 3), '****', right(CAST(`k1` AS varchar(65533)), 4))")); - - // 4. create alias function with cast - // cast any type to decimal with specific precision and scale - createFuncStr = "create global alias function decimal(all, int, int) with parameter(col, precision, scale)" - + " as cast(col as decimal(precision, scale));"; - createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); - Env.getCurrentEnv().createFunction(createFunctionStmt); - - functions = Env.getCurrentEnv().getGlobalFunctionMgr().getFunctions(); - Assert.assertEquals(2, functions.size()); - - queryStr = "select decimal(333, 4, 1);"; - testFunctionQuery(ctx, queryStr, true); - - queryStr = "select decimal(k3, 4, 1) from db2.tbl1;"; - if (Config.enable_decimal_conversion) { - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k3` AS decimalv3(4,1))")); - } else { - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k3` AS decimal(4,1))")); - } - - // 5. cast any type to varchar with fixed length - createFuncStr = "create global alias function db2.varchar(all, int) with parameter(text, length) as " - + "cast(text as varchar(length));"; - createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); - Env.getCurrentEnv().createFunction(createFunctionStmt); - - functions = Env.getCurrentEnv().getGlobalFunctionMgr().getFunctions(); - Assert.assertEquals(3, functions.size()); - - queryStr = "select varchar(333, 4);"; - testFunctionQuery(ctx, queryStr, true); - - queryStr = "select varchar(k1, 4) from db2.tbl1;"; - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k1` AS varchar(65533))")); - - // 6. cast any type to char with fixed length - createFuncStr = "create global alias function db2.to_char(all, int) with parameter(text, length) as " - + "cast(text as char(length));"; - createFunctionStmt = (CreateFunctionStmt) UtFrameUtils.parseAndAnalyzeStmt(createFuncStr, ctx); - Env.getCurrentEnv().createFunction(createFunctionStmt); - - functions = Env.getCurrentEnv().getGlobalFunctionMgr().getFunctions(); - Assert.assertEquals(4, functions.size()); - - queryStr = "select to_char(333, 4);"; - testFunctionQuery(ctx, queryStr, true); - - queryStr = "select to_char(k1, 4) from db2.tbl1;"; - Assert.assertTrue(containsIgnoreCase(dorisAssert.query(queryStr).explainQuery(), - "CAST(`k1` AS character(255))")); + "concat(left(CAST(CAST(k1 as BIGINT) AS VARCHAR(65533)), 3), '****'," + + " right(CAST(CAST(k1 AS BIGINT) AS VARCHAR(65533)), 4))")); } private void testFunctionQuery(ConnectContext ctx, String queryStr, Boolean isStringLiteral) throws Exception { diff --git a/fe/fe-core/src/test/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgrTest.java index f62078c3050e16..2522e2487ac349 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgrTest.java @@ -195,7 +195,7 @@ public Cloud.CommitTxnResponse commitTxn(Cloud.CommitTxnRequest request) { Table testTable1 = masterEnv.getInternalCatalog().getDbOrMetaException(CatalogTestUtil.testDbId1) .getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), - transactionId, null); + transactionId, null, null); } @Test @@ -220,7 +220,7 @@ public Cloud.CommitTxnResponse commitTxn(Cloud.CommitTxnRequest request) { Table testTable1 = masterEnv.getInternalCatalog().getDbOrMetaException(CatalogTestUtil.testDbId1) .getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), - transactionId, null); + transactionId, null, null); } @Test @@ -247,7 +247,7 @@ public Cloud.CommitTxnResponse commitTxn(Cloud.CommitTxnRequest request) { Table testTable1 = masterEnv.getInternalCatalog().getDbOrMetaException(CatalogTestUtil.testDbId1) .getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), - transactionId, null); + transactionId, null, null); }); } @@ -279,7 +279,7 @@ public Cloud.CommitTxnResponse commitTxn(Cloud.CommitTxnRequest request) { Table testTable1 = masterEnv.getInternalCatalog().getDbOrMetaException(CatalogTestUtil.testDbId1) .getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), - transactionId, null); + transactionId, null, null); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveMetaStoreCacheTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveMetaStoreCacheTest.java new file mode 100644 index 00000000000000..607fc3b65394be --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveMetaStoreCacheTest.java @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.datasource.hive; + +import org.apache.doris.common.ThreadPoolManager; + +import com.github.benmanes.caffeine.cache.LoadingCache; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.concurrent.ThreadPoolExecutor; + +public class HiveMetaStoreCacheTest { + + @Test + public void testInvalidateTableCache() { + ThreadPoolExecutor executor = ThreadPoolManager.newDaemonFixedThreadPool( + 1, 1, "refresh", 1, false); + ThreadPoolExecutor listExecutor = ThreadPoolManager.newDaemonFixedThreadPool( + 1, 1, "file", 1, false); + + HiveMetaStoreCache hiveMetaStoreCache = new HiveMetaStoreCache( + new HMSExternalCatalog(1L, "catalog", null, new HashMap<>(), null), executor, listExecutor); + + LoadingCache fileCache = hiveMetaStoreCache.getFileCacheRef().get(); + LoadingCache partitionCache = hiveMetaStoreCache.getPartitionCache(); + LoadingCache partitionValuesCache = hiveMetaStoreCache.getPartitionValuesCache(); + + String dbName = "db"; + String tbName = "tb"; + String tbName2 = "tb2"; + + putCache(fileCache, partitionCache, partitionValuesCache, dbName, tbName); + Assertions.assertEquals(2, fileCache.asMap().size()); + Assertions.assertEquals(1, partitionCache.asMap().size()); + Assertions.assertEquals(1, partitionValuesCache.asMap().size()); + + putCache(fileCache, partitionCache, partitionValuesCache, dbName, tbName2); + Assertions.assertEquals(4, fileCache.asMap().size()); + Assertions.assertEquals(2, partitionCache.asMap().size()); + Assertions.assertEquals(2, partitionValuesCache.asMap().size()); + + hiveMetaStoreCache.invalidateTableCache(dbName, tbName2); + Assertions.assertEquals(2, fileCache.asMap().size()); + Assertions.assertEquals(1, partitionCache.asMap().size()); + Assertions.assertEquals(1, partitionValuesCache.asMap().size()); + + hiveMetaStoreCache.invalidateTableCache(dbName, tbName); + Assertions.assertEquals(0, fileCache.asMap().size()); + Assertions.assertEquals(0, partitionCache.asMap().size()); + Assertions.assertEquals(0, partitionValuesCache.asMap().size()); + } + + private void putCache( + LoadingCache fileCache, + LoadingCache partitionCache, + LoadingCache partitionValuesCache, + String dbName, String tbName) { + HiveMetaStoreCache.FileCacheKey fileCacheKey1 = new HiveMetaStoreCache.FileCacheKey(dbName, tbName, tbName, "", new ArrayList<>(), null); + HiveMetaStoreCache.FileCacheKey fileCacheKey2 = HiveMetaStoreCache.FileCacheKey.createDummyCacheKey(dbName, tbName, tbName, "", null); + fileCache.put(fileCacheKey1, new HiveMetaStoreCache.FileCacheValue()); + fileCache.put(fileCacheKey2, new HiveMetaStoreCache.FileCacheValue()); + + HiveMetaStoreCache.PartitionCacheKey partitionCacheKey = new HiveMetaStoreCache.PartitionCacheKey( + dbName, + tbName, + new ArrayList<>() + ); + partitionCache.put(partitionCacheKey, new HivePartition(dbName, tbName, false, "", "", new ArrayList<>(), new HashMap<>())); + + HiveMetaStoreCache.PartitionValueCacheKey partitionValueCacheKey = new HiveMetaStoreCache.PartitionValueCacheKey(dbName, tbName, new ArrayList<>()); + partitionValuesCache.put(partitionValueCacheKey, new HiveMetaStoreCache.HivePartitionValues()); + + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/external/hms/HmsCatalogTest.java b/fe/fe-core/src/test/java/org/apache/doris/external/hms/HmsCatalogTest.java index e6281fd8f2fa20..fb6ac9859341d8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/external/hms/HmsCatalogTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/external/hms/HmsCatalogTest.java @@ -338,7 +338,6 @@ private void createDbAndTableForHmsCatalog(HMSExternalCatalog hmsCatalog) { public void testQueryView() { SessionVariable sv = connectContext.getSessionVariable(); Assertions.assertNotNull(sv); - sv.setEnableNereidsPlanner(true); createDbAndTableForHmsCatalog((HMSExternalCatalog) env.getCatalogMgr().getCatalog(HMS_CATALOG)); queryViews(false); diff --git a/fe/fe-core/src/test/java/org/apache/doris/insertoverwrite/InsertOverwriteManagerTest.java b/fe/fe-core/src/test/java/org/apache/doris/insertoverwrite/InsertOverwriteManagerTest.java new file mode 100644 index 00000000000000..4bf6c9f12d564b --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/insertoverwrite/InsertOverwriteManagerTest.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.insertoverwrite; + +import org.apache.doris.catalog.DatabaseIf; +import org.apache.doris.catalog.TableIf; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.MetaNotFoundException; + +import mockit.Expectations; +import mockit.Mocked; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class InsertOverwriteManagerTest { + @Mocked + private DatabaseIf db; + + @Mocked + private TableIf table; + + @Before + public void setUp() + throws NoSuchMethodException, SecurityException, AnalysisException, DdlException, MetaNotFoundException { + + new Expectations() { + { + db.getId(); + minTimes = 0; + result = 1L; + + db.getFullName(); + minTimes = 0; + result = "db1"; + + table.getId(); + minTimes = 0; + result = 2L; + + table.getName(); + minTimes = 0; + result = "table1"; + } + }; + } + + @Test + public void testParallel() { + InsertOverwriteManager manager = new InsertOverwriteManager(); + manager.recordRunningTableOrException(db, table); + try { + manager.recordRunningTableOrException(db, table); + } catch (Exception e) { + Assert.assertTrue(e.getMessage().contains("Not allowed")); + } + manager.dropRunningRecord(db.getId(), table.getId()); + manager.recordRunningTableOrException(db, table); + } + +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java index c18682bdc3c60c..20cb626ff37055 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/KafkaRoutineLoadJobTest.java @@ -225,7 +225,7 @@ public void testProcessTimeOutTasks(@Injectable GlobalTransactionMgr globalTrans Map partitionIdsToOffset = Maps.newHashMap(); partitionIdsToOffset.put(100, 0L); KafkaTaskInfo kafkaTaskInfo = new KafkaTaskInfo(new UUID(1, 1), 1L, - maxBatchIntervalS * 2 * 1000, 0, partitionIdsToOffset, false); + maxBatchIntervalS * 2 * 1000, partitionIdsToOffset, false); kafkaTaskInfo.setExecuteStartTimeMs(System.currentTimeMillis() - maxBatchIntervalS * 2 * 1000 - 1); routineLoadTaskInfoList.add(kafkaTaskInfo); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java index 863cc6807c7574..6f3dd2eaaa851a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java @@ -290,7 +290,6 @@ public List> getRealOffsets(String brokerList, String topic, }; RoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob(); - Deencapsulation.setField(routineLoadJob, "state", RoutineLoadJob.JobState.RUNNING); Deencapsulation.setField(routineLoadJob, "progress", kafkaProgress); routineLoadJob.update(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadTaskSchedulerTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadTaskSchedulerTest.java index 1548017b66115b..95c2423de71fa9 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadTaskSchedulerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadTaskSchedulerTest.java @@ -69,7 +69,7 @@ public void testRunOneCycle(@Injectable KafkaRoutineLoadJob kafkaRoutineLoadJob1 Deencapsulation.setField(kafkaProgress, "partitionIdToOffset", partitionIdToOffset); LinkedBlockingDeque routineLoadTaskInfoQueue = new LinkedBlockingDeque<>(); - KafkaTaskInfo routineLoadTaskInfo1 = new KafkaTaskInfo(new UUID(1, 1), 1L, 20000, 0, + KafkaTaskInfo routineLoadTaskInfo1 = new KafkaTaskInfo(new UUID(1, 1), 1L, 20000, partitionIdToOffset, false); routineLoadTaskInfoQueue.addFirst(routineLoadTaskInfo1); diff --git a/fe/fe-core/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java b/fe/fe-core/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java index 1c7c2a6c655bff..e7f81c31a64363 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/mysql/privilege/AuthTest.java @@ -2478,7 +2478,7 @@ public void testShowRoles() { String name = row.get(0); if (role.equals(name)) { findWgPriv = true; - String wgPriv = row.get(row.size() - 1); + String wgPriv = row.get(row.size() - 2); Assert.assertTrue("test_wg: Usage_priv".equals(wgPriv)); } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/memo/GroupExpressionTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/memo/GroupExpressionTest.java new file mode 100644 index 00000000000000..903eaaefefb9fa --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/memo/GroupExpressionTest.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.memo; + +import org.apache.doris.nereids.cost.Cost; +import org.apache.doris.nereids.properties.PhysicalProperties; +import org.apache.doris.nereids.trees.plans.FakePlan; +import org.apache.doris.qe.SessionVariable; + +import com.google.common.collect.Lists; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class GroupExpressionTest { + + @Test + public void testMergeToNotOwnerRemoveWhenTargetWithLowerCost() { + GroupExpression source = new GroupExpression(new FakePlan()); + source.updateLowestCostTable(PhysicalProperties.GATHER, Lists.newArrayList(), Cost.infinite(new SessionVariable())); + source.putOutputPropertiesMap(PhysicalProperties.GATHER, PhysicalProperties.ANY); + + GroupExpression target = new GroupExpression(new FakePlan()); + target.updateLowestCostTable(PhysicalProperties.ANY, Lists.newArrayList(), Cost.zero(new SessionVariable())); + target.putOutputPropertiesMap(PhysicalProperties.ANY, PhysicalProperties.ANY); + + source.mergeToNotOwnerRemove(target); + Assertions.assertTrue(target.getLowestCostTable().containsKey(PhysicalProperties.ANY)); + Assertions.assertTrue(target.getLowestCostTable().containsKey(PhysicalProperties.GATHER)); + Assertions.assertEquals(PhysicalProperties.ANY, target.getOutputProperties(PhysicalProperties.ANY)); + } + + @Test + public void testMergeToNotOwnerRemoveWhenSourceWithLowerCost() { + GroupExpression source = new GroupExpression(new FakePlan()); + source.updateLowestCostTable(PhysicalProperties.GATHER, Lists.newArrayList(), Cost.zero(new SessionVariable())); + source.putOutputPropertiesMap(PhysicalProperties.GATHER, PhysicalProperties.ANY); + + GroupExpression target = new GroupExpression(new FakePlan()); + target.updateLowestCostTable(PhysicalProperties.ANY, Lists.newArrayList(), Cost.infinite(new SessionVariable())); + target.putOutputPropertiesMap(PhysicalProperties.ANY, PhysicalProperties.ANY); + + source.mergeToNotOwnerRemove(target); + Assertions.assertTrue(target.getLowestCostTable().containsKey(PhysicalProperties.ANY)); + Assertions.assertTrue(target.getLowestCostTable().containsKey(PhysicalProperties.GATHER)); + Assertions.assertEquals(PhysicalProperties.GATHER, target.getOutputProperties(PhysicalProperties.ANY)); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/metrics/EventTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/metrics/EventTest.java index 631022d1759d03..b3566ed00d2e24 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/metrics/EventTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/metrics/EventTest.java @@ -48,7 +48,6 @@ public void write(int b) { @Override public void runBeforeAll() { connectContext.getSessionVariable().setEnableNereidsTrace(true); - connectContext.getSessionVariable().setEnableNereidsPlanner(true); channel = new EventChannel() .addConsumers( new PrintConsumer(CounterEvent.class, printStream), diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/mv/MtmvCacheNewConnectContextTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/mv/MtmvCacheNewConnectContextTest.java new file mode 100644 index 00000000000000..0134d5df4e7166 --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/mv/MtmvCacheNewConnectContextTest.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.mv; + +import org.apache.doris.catalog.MTMV; +import org.apache.doris.mtmv.MTMVRelationManager; +import org.apache.doris.nereids.CascadesContext; +import org.apache.doris.nereids.sqltest.SqlTestBase; +import org.apache.doris.nereids.util.PlanChecker; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.SessionVariable; + +import mockit.Mock; +import mockit.MockUp; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.BitSet; + +/** + * The connectContext would new instance when generate MTMVCache, after generate, the connectContext should + * reset the connectContext by earlier connectContext to avoid slot id error + * The test is for this. + * */ +public class MtmvCacheNewConnectContextTest extends SqlTestBase { + + @Test + void testConnectContextIsCorrect() throws Exception { + ConnectContext tmp = ConnectContext.get(); + connectContext.getSessionVariable().setDisableNereidsRules("PRUNE_EMPTY_PARTITION"); + BitSet disableNereidsRules = connectContext.getSessionVariable().getDisableNereidsRules(); + new MockUp() { + @Mock + public BitSet getDisableNereidsRules() { + return disableNereidsRules; + } + }; + new MockUp() { + @Mock + public boolean isMVPartitionValid(MTMV mtmv, ConnectContext ctx, boolean forceConsistent) { + return true; + } + }; + connectContext.getSessionVariable().enableMaterializedViewRewrite = true; + connectContext.getSessionVariable().enableMaterializedViewNestRewrite = true; + + createMvByNereids("create materialized view mv1 BUILD IMMEDIATE REFRESH COMPLETE ON MANUAL\n" + + " DISTRIBUTED BY RANDOM BUCKETS 1\n" + + " PROPERTIES ('replication_num' = '1') \n" + + " as select T1.id from T1 inner join T2 " + + " on T1.id = T2.id;"); + CascadesContext c1 = createCascadesContext( + "select T1.id from T1 inner join T2 " + + "on T1.id = T2.id " + + "inner join T3 on T1.id = T3.id", + connectContext + ); + PlanChecker.from(c1) + .analyze() + .rewrite() + .optimize() + .printlnBestPlanTree(); + + ConnectContext now = ConnectContext.get(); + // The connectContext should not change + Assertions.assertSame(tmp, now); + dropMvByNereids("drop materialized view mv1"); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/preprocess/SelectHintTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/preprocess/SelectHintTest.java deleted file mode 100644 index 03b7e8dc366644..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/preprocess/SelectHintTest.java +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.nereids.preprocess; - -import org.apache.doris.catalog.Env; -import org.apache.doris.nereids.NereidsPlanner; -import org.apache.doris.nereids.StatementContext; -import org.apache.doris.nereids.exceptions.MustFallbackException; -import org.apache.doris.nereids.parser.NereidsParser; -import org.apache.doris.nereids.properties.PhysicalProperties; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.OriginStatement; -import org.apache.doris.qe.SessionVariable; -import org.apache.doris.qe.StmtExecutor; -import org.apache.doris.thrift.TUniqueId; - -import mockit.Expectations; -import mockit.Mock; -import mockit.MockUp; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; - -public class SelectHintTest { - - @BeforeAll - public static void init() { - ConnectContext ctx = new ConnectContext(); - new MockUp() { - @Mock - public ConnectContext get() { - return ctx; - } - }; - new MockUp() { - @Mock - public boolean isMaster() { - return true; - } - }; - } - - @Test - public void testFallbackToOriginalPlanner() throws Exception { - String sql = " SELECT /*+ SET_VAR(enable_nereids_planner=\"false\") */ 1"; - - ConnectContext ctx = new ConnectContext(); - ctx.setEnv(Env.getCurrentEnv()); - StatementContext statementContext = new StatementContext(ctx, new OriginStatement(sql, 0)); - SessionVariable sv = ctx.getSessionVariable(); - Assertions.assertNotNull(sv); - sv.setEnableNereidsPlanner(true); - Assertions.assertThrows(MustFallbackException.class, () -> new NereidsPlanner(statementContext) - .planWithLock(new NereidsParser().parseSingle(sql), PhysicalProperties.ANY)); - - // manually recover sv - sv.setEnableNereidsPlanner(true); - StmtExecutor stmtExecutor = new StmtExecutor(ctx, sql); - - new Expectations(stmtExecutor) { - { - stmtExecutor.executeByLegacy((TUniqueId) any); - } - }; - - stmtExecutor.execute(); - - Assertions.assertTrue(sv.isEnableNereidsPlanner()); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/CustomAccessControllerFactory.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/CustomAccessControllerFactory.java new file mode 100644 index 00000000000000..f30ab8def4fae8 --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/CustomAccessControllerFactory.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.privileges; + +import org.apache.doris.mysql.privilege.AccessControllerFactory; +import org.apache.doris.mysql.privilege.CatalogAccessController; + +import java.util.Map; + +public class CustomAccessControllerFactory implements AccessControllerFactory { + @Override + public String factoryIdentifier() { + return "CustomAccess"; + } + + @Override + public CatalogAccessController createAccessController(Map prop) { + return new TestCheckPrivileges.SimpleCatalogAccessController(); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/TestCheckPrivileges.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/TestCheckPrivileges.java index 5ad41d7e6b3be0..2b2f878c7f3751 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/TestCheckPrivileges.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/privileges/TestCheckPrivileges.java @@ -23,10 +23,10 @@ import org.apache.doris.catalog.Env; import org.apache.doris.catalog.PrimitiveType; import org.apache.doris.common.AuthorizationException; +import org.apache.doris.common.DdlException; import org.apache.doris.common.FeConstants; import org.apache.doris.datasource.CatalogMgr; import org.apache.doris.datasource.test.TestExternalCatalog.TestCatalogProvider; -import org.apache.doris.mysql.privilege.AccessControllerFactory; import org.apache.doris.mysql.privilege.AccessControllerManager; import org.apache.doris.mysql.privilege.CatalogAccessController; import org.apache.doris.mysql.privilege.DataMaskPolicy; @@ -92,10 +92,20 @@ public void testPrivilegesAndPolicies() throws Exception { String catalogProvider = "org.apache.doris.nereids.privileges.TestCheckPrivileges$CustomCatalogProvider"; String accessControllerFactory - = "org.apache.doris.nereids.privileges.TestCheckPrivileges$CustomAccessControllerFactory"; - + = "org.apache.doris.nereids.privileges.CustomAccessControllerFactory"; String catalog = "custom_catalog"; String db = "test_db"; + String failedAccessControllerFactory + = "org.apache.doris.nereids.privileges.FailedAccessControllerFactory"; + //try to create catalog with failed access controller + Assertions.assertThrows(DdlException.class, () -> { + createCatalog("create catalog " + catalog + " properties(" + + " \"type\"=\"test\"," + + " \"catalog_provider.class\"=\"" + catalogProvider + "\"," + + " \"" + CatalogMgr.ACCESS_CONTROLLER_CLASS_PROP + "\"=\"" + failedAccessControllerFactory + "\"" + + ")"); + }, "Failed to init access controller"); + createCatalog("create catalog " + catalog + " properties(" + " \"type\"=\"test\"," + " \"catalog_provider.class\"=\"" + catalogProvider + "\"," @@ -314,13 +324,6 @@ public Map>> getMetadata() { } } - public static class CustomAccessControllerFactory implements AccessControllerFactory { - @Override - public CatalogAccessController createAccessController(Map prop) { - return new SimpleCatalogAccessController(); - } - } - public static class SimpleCatalogAccessController implements CatalogAccessController { private static ThreadLocal> tablePrivileges = new ThreadLocal<>(); private static ThreadLocal> columnPrivileges = new ThreadLocal<>(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/CheckRowPolicyTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/CheckRowPolicyTest.java index b807bbbbc7a4bd..4c24c789b12373 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/CheckRowPolicyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/CheckRowPolicyTest.java @@ -196,7 +196,6 @@ public void checkOnePolicy() throws Exception { LogicalRelation relation = new LogicalOlapScan(StatementScopeIdGenerator.newRelationId(), olapTable, Arrays.asList(fullDbName)); LogicalCheckPolicy checkPolicy = new LogicalCheckPolicy<>(relation); - connectContext.getSessionVariable().setEnableNereidsPlanner(true); createPolicy("CREATE ROW POLICY " + policyName + " ON " @@ -226,7 +225,6 @@ public void checkOnePolicyRandomDist() throws Exception { ImmutableList.of(tableNameRanddomDist)), connectContext, new BindRelation()); LogicalCheckPolicy checkPolicy = new LogicalCheckPolicy(plan); - connectContext.getSessionVariable().setEnableNereidsPlanner(true); createPolicy("CREATE ROW POLICY " + policyName + " ON " diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtilsTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtilsTest.java index b44e1cc3ec681a..ccc759dff3de94 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtilsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtilsTest.java @@ -251,6 +251,26 @@ protected void runBeforeAll() throws Exception { connectContext.getSessionVariable().setDisableNereidsRules("OLAP_SCAN_PARTITION_PRUNE,PRUNE_EMPTY_PARTITION"); } + // Test when join both side are all partition table and partition column name is same + @Test + public void joinPartitionNameSameTest() { + PlanChecker.from(connectContext) + .checkExplain("select t1.upgrade_day, t2.batch_no, count(*) " + + "from test2 t2 join test1 t1 on " + + "t1.upgrade_day = t2.upgrade_day " + + "group by t1.upgrade_day, t2.batch_no;", + nereidsPlanner -> { + Plan rewrittenPlan = nereidsPlanner.getRewrittenPlan(); + RelatedTableInfo relatedTableInfo = + MaterializedViewUtils.getRelatedTableInfo("upgrade_day", null, + rewrittenPlan, nereidsPlanner.getCascadesContext()); + checkRelatedTableInfo(relatedTableInfo, + "test1", + "upgrade_day", + true); + }); + } + @Test public void getRelatedTableInfoWhenAutoPartitionTest() { PlanChecker.from(connectContext) diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/FoldConstantTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/FoldConstantTest.java index 4a5a5e9065c2b7..bd26306c2a066a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/FoldConstantTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/FoldConstantTest.java @@ -23,6 +23,7 @@ import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.parser.NereidsParser; import org.apache.doris.nereids.rules.analysis.ExpressionAnalyzer; +import org.apache.doris.nereids.rules.expression.rules.FoldConstantRule; import org.apache.doris.nereids.rules.expression.rules.FoldConstantRuleOnFE; import org.apache.doris.nereids.trees.expressions.Cast; import org.apache.doris.nereids.trees.expressions.Expression; @@ -754,6 +755,21 @@ void testFoldTypeOfNullLiteral() { Assertions.assertTrue(e1.getDataType() instanceof VarcharType); } + @Test + void testFoldNvl() { + executor = new ExpressionRuleExecutor(ImmutableList.of( + ExpressionAnalyzer.FUNCTION_ANALYZER_RULE, + bottomUp( + FoldConstantRule.INSTANCE + ) + )); + + assertRewriteExpression("nvl(NULL, 1)", "1"); + assertRewriteExpression("nvl(NULL, NULL)", "NULL"); + assertRewriteAfterTypeCoercion("nvl(IA, NULL)", "ifnull(IA, NULL)"); + assertRewriteAfterTypeCoercion("nvl(IA, 1)", "ifnull(IA, 1)"); + } + private void assertRewriteExpression(String actualExpression, String expectedExpression) { ExpressionRewriteContext context = new ExpressionRewriteContext( MemoTestUtils.createCascadesContext(new UnboundRelation(new RelationId(1), ImmutableList.of("test_table")))); diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/AdjustNullableTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/AdjustNullableTest.java new file mode 100644 index 00000000000000..023f9c4f7ff33a --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/AdjustNullableTest.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.rules.rewrite; + +import org.apache.doris.nereids.trees.expressions.ExprId; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.GreaterThan; +import org.apache.doris.nereids.trees.expressions.Slot; +import org.apache.doris.nereids.trees.expressions.SlotReference; +import org.apache.doris.nereids.trees.expressions.literal.Literal; +import org.apache.doris.nereids.trees.plans.RelationId; +import org.apache.doris.nereids.trees.plans.logical.LogicalJdbcScan; +import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan; +import org.apache.doris.nereids.types.IntegerType; +import org.apache.doris.nereids.util.MemoPatternMatchSupported; +import org.apache.doris.nereids.util.PlanConstructor; + +import mockit.Mock; +import mockit.MockUp; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; + +/** + * Tests for {@link AdjustNullableTest}. + */ +class AdjustNullableTest implements MemoPatternMatchSupported { + private final LogicalOlapScan scan = PlanConstructor.newLogicalOlapScan(0, "t1", 0); + + @Test + void testLogicalExternalRelation() { + new MockUp() { + @Mock + public Set getOutputSet() { + Set output = new HashSet<>(); + output.add(new SlotReference(new ExprId(1), "id", IntegerType.INSTANCE, false, + new ArrayList<>())); + return output; + } + }; + + GreaterThan gt = new GreaterThan(new SlotReference(new ExprId(1), "id", + IntegerType.INSTANCE, true, new ArrayList<>()), Literal.of("1")); + Set conjuncts = new HashSet<>(); + conjuncts.add(gt); + Assertions.assertTrue(conjuncts.iterator().next().nullable()); + LogicalJdbcScan jdbcScan = + new LogicalJdbcScan(new RelationId(1), PlanConstructor.newOlapTable(0, "t1", 0), + new ArrayList<>(), Optional.empty(), Optional.empty(), conjuncts); + AdjustNullable adjustNullable = new AdjustNullable(); + LogicalJdbcScan newJdbcScan = (LogicalJdbcScan) adjustNullable.rewriteRoot(jdbcScan, null); + conjuncts = newJdbcScan.getConjuncts(); + Assertions.assertFalse(conjuncts.iterator().next().nullable()); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/FilterEstimationTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/FilterEstimationTest.java index 6e76c3f6a33d1c..15f2f4d7e9c123 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/FilterEstimationTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/FilterEstimationTest.java @@ -70,10 +70,10 @@ public void testOrNaN() { LessThan lessThan = new LessThan(b, int100); Or or = new Or(greaterThan1, lessThan); Map columnStat = new HashMap<>(); - ColumnStatistic aStats = new ColumnStatisticBuilder().setCount(500).setNdv(500).setAvgSizeByte(4) + ColumnStatistic aStats = new ColumnStatisticBuilder(500).setNdv(500).setAvgSizeByte(4) .setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).build(); - ColumnStatistic bStats = new ColumnStatisticBuilder().setCount(500).setNdv(500).setAvgSizeByte(4) + ColumnStatistic bStats = new ColumnStatisticBuilder(500).setNdv(500).setAvgSizeByte(4) .setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).setIsUnknown(true).build(); columnStat.put(a, aStats); @@ -99,10 +99,10 @@ public void testAndNaN() { LessThan lessThan = new LessThan(b, int100); And and = new And(greaterThan1, lessThan); Map columnStat = new HashMap<>(); - ColumnStatistic aStats = new ColumnStatisticBuilder().setCount(500).setNdv(500) + ColumnStatistic aStats = new ColumnStatisticBuilder(500).setNdv(500) .setAvgSizeByte(4).setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).build(); - ColumnStatistic bStats = new ColumnStatisticBuilder().setCount(500).setNdv(500) + ColumnStatistic bStats = new ColumnStatisticBuilder(500).setNdv(500) .setAvgSizeByte(4).setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).setIsUnknown(true).build(); columnStat.put(a, aStats); @@ -165,7 +165,7 @@ public void testRelatedAnd() { LessThan le = new LessThan(a, int200); And and = new And(ge, le); Map slotToColumnStat = new HashMap<>(); - ColumnStatistic aStats = new ColumnStatisticBuilder().setCount(300).setNdv(30) + ColumnStatistic aStats = new ColumnStatisticBuilder(300).setNdv(30) .setAvgSizeByte(4).setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(300).build(); slotToColumnStat.put(a, aStats); @@ -184,8 +184,7 @@ public void knownEqualToUnknown() { SlotReference ym = new SlotReference("a", new VarcharType(7)); double rowCount = 404962.0; double ndv = 14.0; - ColumnStatistic ymStats = new ColumnStatisticBuilder() - .setCount(rowCount) + ColumnStatistic ymStats = new ColumnStatisticBuilder(rowCount) .setNdv(ndv) .setMinExpr(new StringLiteral("2023-07")) .setMinValue(14126741000630328.000000) @@ -211,8 +210,7 @@ public void knownEqualToUnknownWithLittleNdv() { SlotReference ym = new SlotReference("a", new VarcharType(7)); double rowCount = 404962.0; double ndv = 0.5; - ColumnStatistic ymStats = new ColumnStatisticBuilder() - .setCount(rowCount) + ColumnStatistic ymStats = new ColumnStatisticBuilder(rowCount) .setNdv(ndv) .setMinExpr(new StringLiteral("2023-07")) .setMinValue(14126741000630328.000000) @@ -267,13 +265,13 @@ public void test1() { And and = new And(greaterThan1, lessThan); Or or = new Or(and, equalTo); Map slotToColumnStat = new HashMap<>(); - ColumnStatistic aStats = new ColumnStatisticBuilder().setCount(500).setNdv(500) + ColumnStatistic aStats = new ColumnStatisticBuilder(500).setNdv(500) .setAvgSizeByte(4).setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).build(); - ColumnStatistic bStats = new ColumnStatisticBuilder().setCount(500).setNdv(500) + ColumnStatistic bStats = new ColumnStatisticBuilder(500).setNdv(500) .setAvgSizeByte(4).setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).build(); - ColumnStatistic cStats = new ColumnStatisticBuilder().setCount(500).setNdv(500) + ColumnStatistic cStats = new ColumnStatisticBuilder(500).setNdv(500) .setAvgSizeByte(4).setNumNulls(0).setDataSize(0) .setMinValue(0).setMaxValue(1000).setMinExpr(null).build(); slotToColumnStat.put(a, aStats); @@ -312,7 +310,7 @@ public void test2() { FilterEstimation filterEstimation = new FilterEstimation(); Statistics expected = filterEstimation.estimate(or, stat); Assertions.assertTrue( - Precision.equals(512.5, + Precision.equals(503.12, expected.getRowCount(), 0.01)); } @@ -334,7 +332,7 @@ public void test3() { Statistics stat = new Statistics(1000, slotToColumnStat); FilterEstimation filterEstimation = new FilterEstimation(); Statistics expected = filterEstimation.estimate(ge, stat); - Assertions.assertEquals(1000 * 1.0 / 500, expected.getRowCount()); + Assertions.assertEquals(1000 * (500.0 / 1000) * (1.0 / 500), expected.getRowCount()); } // a <= 500 @@ -355,7 +353,7 @@ public void test4() { Statistics stat = new Statistics(1000, slotToColumnStat); FilterEstimation filterEstimation = new FilterEstimation(); Statistics expected = filterEstimation.estimate(le, stat); - Assertions.assertEquals(1000 * 1.0 / 500, expected.getRowCount()); + Assertions.assertEquals(1000 * (500.0 / 1000) * (1.0 / 500), expected.getRowCount()); } // a < 500 @@ -376,7 +374,7 @@ public void test5() { Statistics stat = new Statistics(1000, slotToColumnStat); FilterEstimation filterEstimation = new FilterEstimation(); Statistics expected = filterEstimation.estimate(less, stat); - Assertions.assertEquals(2, expected.getRowCount()); + Assertions.assertEquals(1, expected.getRowCount()); } // a > 1000 @@ -397,7 +395,7 @@ public void test6() { Statistics stat = new Statistics(1000, slotToColumnStat); FilterEstimation filterEstimation = new FilterEstimation(); Statistics expected = filterEstimation.estimate(ge, stat); - Assertions.assertEquals(2, expected.getRowCount()); + Assertions.assertEquals(1, expected.getRowCount()); } // a > b @@ -659,27 +657,24 @@ public void testFilterOutofMinMax() { IntegerLiteral i300 = new IntegerLiteral(300); GreaterThan ge = new GreaterThan(c, i300); Map slotToColumnStat = new HashMap<>(); - ColumnStatisticBuilder builderA = new ColumnStatisticBuilder() + ColumnStatisticBuilder builderA = new ColumnStatisticBuilder(1000) .setNdv(1000) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(1000) - .setMaxValue(10000) - .setCount(1000); - ColumnStatisticBuilder builderB = new ColumnStatisticBuilder() + .setMaxValue(10000); + ColumnStatisticBuilder builderB = new ColumnStatisticBuilder(1000) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(0) - .setMaxValue(500) - .setCount(1000); - ColumnStatisticBuilder builderC = new ColumnStatisticBuilder() + .setMaxValue(500); + ColumnStatisticBuilder builderC = new ColumnStatisticBuilder(1000) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(0) - .setMaxValue(200) - .setCount(1000); + .setMaxValue(200); slotToColumnStat.put(a, builderA.build()); slotToColumnStat.put(b, builderB.build()); slotToColumnStat.put(c, builderC.build()); @@ -799,22 +794,19 @@ public void testInPredicateEstimationForColumnsOutofRange() { IntegerLiteral i200 = new IntegerLiteral(200); Map slotToColumnStat = new HashMap<>(); - ColumnStatisticBuilder builderA = new ColumnStatisticBuilder() + ColumnStatisticBuilder builderA = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(0) - .setMaxValue(100) - .setCount(100); - ColumnStatisticBuilder builderB = new ColumnStatisticBuilder() - .setCount(100) + .setMaxValue(100); + ColumnStatisticBuilder builderB = new ColumnStatisticBuilder(100) .setNdv(20) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(0) .setMaxValue(500); - ColumnStatisticBuilder builderC = new ColumnStatisticBuilder() - .setCount(100) + ColumnStatisticBuilder builderC = new ColumnStatisticBuilder(100) .setNdv(40) .setAvgSizeByte(4) .setNumNulls(0) @@ -868,22 +860,19 @@ public void testFilterEstimationForColumnsNotChanged() { IntegerLiteral i10 = new IntegerLiteral(10); Map slotToColumnStat = new HashMap<>(); - ColumnStatisticBuilder builderA = new ColumnStatisticBuilder() + ColumnStatisticBuilder builderA = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(0) - .setMaxValue(100) - .setCount(100); - ColumnStatisticBuilder builderB = new ColumnStatisticBuilder() - .setCount(100) + .setMaxValue(100); + ColumnStatisticBuilder builderB = new ColumnStatisticBuilder(100) .setNdv(20) .setAvgSizeByte(4) .setNumNulls(0) .setMinValue(0) .setMaxValue(500); - ColumnStatisticBuilder builderC = new ColumnStatisticBuilder() - .setCount(100) + ColumnStatisticBuilder builderC = new ColumnStatisticBuilder(100) .setNdv(40) .setAvgSizeByte(4) .setNumNulls(0) @@ -914,15 +903,14 @@ public void testFilterEstimationForColumnsNotChanged() { @Test public void testBetweenCastFilter() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(0) .setMaxExpr(new IntLiteral(100)) .setMaxValue(100) .setMinExpr(new IntLiteral(0)) - .setMinValue(0) - .setCount(100); + .setMinValue(0); DoubleLiteral begin = new DoubleLiteral(40.0); DoubleLiteral end = new DoubleLiteral(50.0); LessThan less = new LessThan(new Cast(a, DoubleType.INSTANCE), end); @@ -943,13 +931,12 @@ public void testDateRangeSelectivity() { DateLiteral from = new DateLiteral("1990-01-01"); DateLiteral to = new DateLiteral("2000-01-01"); SlotReference a = new SlotReference("a", DateType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(0) .setMaxValue(to.getDouble()) - .setMinValue(from.getDouble()) - .setCount(100); + .setMinValue(from.getDouble()); DateLiteral mid = new DateLiteral("1999-01-01"); GreaterThan greaterThan = new GreaterThan(a, mid); Statistics stats = new Statistics(100, new HashMap<>()); @@ -962,13 +949,12 @@ public void testDateRangeSelectivity() { @Test public void testIsNull() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(10) .setMaxValue(100) - .setMinValue(0) - .setCount(100); + .setMinValue(0); IsNull isNull = new IsNull(a); Statistics stats = new Statistics(100, new HashMap<>()); stats.addColumnStats(a, builder.build()); @@ -980,13 +966,12 @@ public void testIsNull() { @Test public void testIsNotNull() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(4) .setNumNulls(10) .setMaxValue(100) - .setMinValue(0) - .setCount(100); + .setMinValue(0); IsNull isNull = new IsNull(a); Not not = new Not(isNull); Statistics stats = new Statistics(100, new HashMap<>()); @@ -1002,13 +987,12 @@ public void testIsNotNull() { @Test public void testNumNullsEqualTo() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); EqualTo equalTo = new EqualTo(a, int1); Statistics stats = new Statistics(10, new HashMap<>()); @@ -1024,13 +1008,12 @@ public void testNumNullsEqualTo() { @Test public void testNumNullsComparable() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); GreaterThan greaterThan = new GreaterThan(a, int1); Statistics stats = new Statistics(10, new HashMap<>()); @@ -1046,13 +1029,12 @@ public void testNumNullsComparable() { @Test public void testNumNullsIn() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); IntegerLiteral int2 = new IntegerLiteral(2); InPredicate in = new InPredicate(a, Lists.newArrayList(int1, int2)); @@ -1060,7 +1042,7 @@ public void testNumNullsIn() { stats.addColumnStats(a, builder.build()); FilterEstimation filterEstimation = new FilterEstimation(); Statistics result = filterEstimation.estimate(in, stats); - Assertions.assertEquals(result.getRowCount(), 10.0, 0.01); + Assertions.assertEquals(result.getRowCount(), 2.0, 0.01); } /** @@ -1069,13 +1051,12 @@ public void testNumNullsIn() { @Test public void testNumNullsNotEqualTo() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); EqualTo equalTo = new EqualTo(a, int1); Not not = new Not(equalTo); @@ -1092,13 +1073,12 @@ public void testNumNullsNotEqualTo() { @Test public void testNumNullsNotIn() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); IntegerLiteral int2 = new IntegerLiteral(2); InPredicate in = new InPredicate(a, Lists.newArrayList(int1, int2)); @@ -1116,13 +1096,12 @@ public void testNumNullsNotIn() { @Test public void testNumNullsAnd() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); IntegerLiteral int2 = new IntegerLiteral(2); GreaterThanEqual greaterThanEqual = new GreaterThanEqual(a, int1); @@ -1141,23 +1120,21 @@ public void testNumNullsAnd() { @Test public void testNumNullsAndTwoCol() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builderA = new ColumnStatisticBuilder() + ColumnStatisticBuilder builderA = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(0) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); EqualTo equalTo = new EqualTo(a, int1); SlotReference b = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builderB = new ColumnStatisticBuilder() + ColumnStatisticBuilder builderB = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); Not isNotNull = new Not(new IsNull(b)); And and = new And(equalTo, isNotNull); Statistics stats = new Statistics(10, new HashMap<>()); @@ -1174,13 +1151,12 @@ public void testNumNullsAndTwoCol() { @Test public void testNumNullsOr() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); IntegerLiteral int2 = new IntegerLiteral(2); GreaterThanEqual greaterThanEqual = new GreaterThanEqual(a, int2); @@ -1199,13 +1175,12 @@ public void testNumNullsOr() { @Test public void testNumNullsOrIsNull() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - ColumnStatisticBuilder builder = new ColumnStatisticBuilder() + ColumnStatisticBuilder builder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); IntegerLiteral int1 = new IntegerLiteral(1); GreaterThanEqual greaterThanEqual = new GreaterThanEqual(a, int1); IsNull isNull = new IsNull(a); @@ -1219,23 +1194,22 @@ public void testNumNullsOrIsNull() { @Test public void testNullSafeEqual() { - ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(10) .setNdv(2) .setAvgSizeByte(4) .setNumNulls(8) .setMaxValue(2) - .setMinValue(1) - .setCount(10); + .setMinValue(1); ColumnStatistic aStats = columnStatisticBuilder.build(); SlotReference a = new SlotReference("a", IntegerType.INSTANCE); - columnStatisticBuilder.setNdv(2) + ColumnStatisticBuilder columnStatisticBuilder2 = new ColumnStatisticBuilder(10) + .setNdv(2) .setAvgSizeByte(4) .setNumNulls(7) .setMaxValue(2) - .setMinValue(1) - .setCount(10); - ColumnStatistic bStats = columnStatisticBuilder.build(); + .setMinValue(1); + ColumnStatistic bStats = columnStatisticBuilder2.build(); SlotReference b = new SlotReference("b", IntegerType.INSTANCE); StatisticsBuilder statsBuilder = new StatisticsBuilder(); @@ -1258,15 +1232,14 @@ public void testNullSafeEqual() { @Test public void testStringRangeColToLiteral() { SlotReference a = new SlotReference("a", new VarcharType(25)); - ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(25) .setNumNulls(0) .setMaxExpr(new StringLiteral("200")) .setMaxValue(new VarcharLiteral("200").getDouble()) .setMinExpr(new StringLiteral("100")) - .setMinValue(new VarcharLiteral("100").getDouble()) - .setCount(100); + .setMinValue(new VarcharLiteral("100").getDouble()); StatisticsBuilder statsBuilder = new StatisticsBuilder(); statsBuilder.setRowCount(100); statsBuilder.putColumnStatistics(a, columnStatisticBuilder.build()); @@ -1287,15 +1260,14 @@ public void testStringRangeColToLiteral() { @Test public void testStringRangeColToDateLiteral() { SlotReference a = new SlotReference("a", new VarcharType(25)); - ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilder = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(25) .setNumNulls(0) .setMaxExpr(new StringLiteral("2022-01-01")) .setMaxValue(new VarcharLiteral("2022-01-01").getDouble()) .setMinExpr(new StringLiteral("2020-01-01")) - .setMinValue(new VarcharLiteral("2020-01-01").getDouble()) - .setCount(100); + .setMinValue(new VarcharLiteral("2020-01-01").getDouble()); StatisticsBuilder statsBuilder = new StatisticsBuilder(); statsBuilder.setRowCount(100); statsBuilder.putColumnStatistics(a, columnStatisticBuilder.build()); @@ -1316,37 +1288,34 @@ public void testStringRangeColToDateLiteral() { @Test public void testStringRangeColToCol() { SlotReference a = new SlotReference("a", new VarcharType(25)); - ColumnStatisticBuilder columnStatisticBuilderA = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilderA = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(25) .setNumNulls(0) .setMaxExpr(new StringLiteral("2022-01-01")) .setMaxValue(new VarcharLiteral("2022-01-01").getDouble()) .setMinExpr(new StringLiteral("2020-01-01")) - .setMinValue(new VarcharLiteral("2020-01-01").getDouble()) - .setCount(100); + .setMinValue(new VarcharLiteral("2020-01-01").getDouble()); SlotReference b = new SlotReference("b", new VarcharType(25)); - ColumnStatisticBuilder columnStatisticBuilderB = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilderB = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(25) .setNumNulls(0) .setMaxExpr(new StringLiteral("2012-01-01")) .setMaxValue(new VarcharLiteral("2012-01-01").getDouble()) .setMinExpr(new StringLiteral("2010-01-01")) - .setMinValue(new VarcharLiteral("2010-01-01").getDouble()) - .setCount(100); + .setMinValue(new VarcharLiteral("2010-01-01").getDouble()); SlotReference c = new SlotReference("c", new VarcharType(25)); - ColumnStatisticBuilder columnStatisticBuilderC = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilderC = new ColumnStatisticBuilder(100) .setNdv(100) .setAvgSizeByte(25) .setNumNulls(0) .setMaxExpr(new StringLiteral("2021-01-01")) .setMaxValue(new VarcharLiteral("2021-01-01").getDouble()) .setMinExpr(new StringLiteral("2010-01-01")) - .setMinValue(new VarcharLiteral("2010-01-01").getDouble()) - .setCount(100); + .setMinValue(new VarcharLiteral("2010-01-01").getDouble()); StatisticsBuilder statsBuilder = new StatisticsBuilder(); statsBuilder.setRowCount(100); @@ -1372,9 +1341,8 @@ public void testLargeRange() { SlotReference a = new SlotReference("a", IntegerType.INSTANCE); long tenB = 1000000000; long row = 1600000000; - ColumnStatistic colStats = new ColumnStatisticBuilder() + ColumnStatistic colStats = new ColumnStatisticBuilder(row) .setAvgSizeByte(10) - .setCount(row) .setNdv(10000) .setMinExpr(new IntLiteral(0)) .setMinValue(0) @@ -1399,18 +1367,16 @@ public void testLargeRange() { void testAndWithInfinity() { Double row = 1000.0; SlotReference a = new SlotReference("a", new VarcharType(25)); - ColumnStatisticBuilder columnStatisticBuilderA = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilderA = new ColumnStatisticBuilder(row) .setNdv(10) .setAvgSizeByte(4) - .setNumNulls(0) - .setCount(row); + .setNumNulls(0); SlotReference b = new SlotReference("b", IntegerType.INSTANCE); - ColumnStatisticBuilder columnStatisticBuilderB = new ColumnStatisticBuilder() + ColumnStatisticBuilder columnStatisticBuilderB = new ColumnStatisticBuilder(row) .setNdv(488) .setAvgSizeByte(25) - .setNumNulls(0) - .setCount(row); + .setNumNulls(0); StatisticsBuilder statsBuilder = new StatisticsBuilder(); statsBuilder.setRowCount(row); statsBuilder.putColumnStatistics(a, columnStatisticBuilderA.build()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/JoinEstimateTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/JoinEstimateTest.java index 168650c6351239..8e37234a0c6567 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/JoinEstimateTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/JoinEstimateTest.java @@ -55,15 +55,13 @@ public void testInnerJoinStats() { EqualTo eq = new EqualTo(a, b); Statistics leftStats = new StatisticsBuilder().setRowCount(100).build(); leftStats.addColumnStats(a, - new ColumnStatisticBuilder() - .setCount(100) + new ColumnStatisticBuilder(100) .setNdv(10) .build() ); Statistics rightStats = new StatisticsBuilder().setRowCount(80).build(); rightStats.addColumnStats(b, - new ColumnStatisticBuilder() - .setCount(80) + new ColumnStatisticBuilder(80) .setNdv(5) .build() ); @@ -101,20 +99,17 @@ public void testOuterJoinStats() { EqualTo eq = new EqualTo(a, b); Statistics leftStats = new StatisticsBuilder().setRowCount(100).build(); leftStats.addColumnStats(a, - new ColumnStatisticBuilder() - .setCount(100) + new ColumnStatisticBuilder(100) .setNdv(10) .build() ); Statistics rightStats = new StatisticsBuilder().setRowCount(80).build(); rightStats.addColumnStats(b, - new ColumnStatisticBuilder() - .setCount(80) + new ColumnStatisticBuilder(80) .setNdv(0) .build() ).addColumnStats(c, - new ColumnStatisticBuilder() - .setCount(80) + new ColumnStatisticBuilder(80) .setNdv(20) .build() ); diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/StatsCalculatorTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/StatsCalculatorTest.java index 84c162ac9cfbe8..9fc1d3e1a22077 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/StatsCalculatorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/stats/StatsCalculatorTest.java @@ -145,13 +145,17 @@ public void testFilter() { GroupExpression groupExpression = new GroupExpression(logicalFilter, ImmutableList.of(childGroup)); Group ownerGroup = new Group(null, groupExpression, null); StatsCalculator.estimate(groupExpression, null); - Assertions.assertEquals((10000 * 0.1 * 0.05), ownerGroup.getStatistics().getRowCount(), 0.001); + // consider the nonNullSelectivity + // TODO: current normalization of numNulls by ratio will be refined in the future + Assertions.assertEquals(49.90005, ownerGroup.getStatistics().getRowCount(), 0.001); LogicalFilter logicalFilterOr = new LogicalFilter<>(or, groupPlan); GroupExpression groupExpressionOr = new GroupExpression(logicalFilterOr, ImmutableList.of(childGroup)); Group ownerGroupOr = new Group(null, groupExpressionOr, null); StatsCalculator.estimate(groupExpressionOr, null); - Assertions.assertEquals((long) (10000 * (0.1 + 0.05 - 0.1 * 0.05)), + // consider the nonNullSelectivity + // TODO: current normalization of numNulls by ratio will be refined in the future + Assertions.assertEquals(1448.59995, ownerGroupOr.getStatistics().getRowCount(), 0.001); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java deleted file mode 100644 index 68279489bfc1e5..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ColocatePlanTest.java +++ /dev/null @@ -1,309 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.catalog.ColocateGroupSchema; -import org.apache.doris.catalog.ColocateTableIndex; -import org.apache.doris.catalog.ColocateTableIndex.GroupId; -import org.apache.doris.catalog.Database; -import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.OlapTable; -import org.apache.doris.common.FeConstants; -import org.apache.doris.common.jmockit.Deencapsulation; -import org.apache.doris.qe.Coordinator; -import org.apache.doris.qe.QueryStatisticsItem; -import org.apache.doris.qe.StmtExecutor; -import org.apache.doris.utframe.TestWithFeService; - -import org.apache.commons.lang3.StringUtils; -import org.junit.Assert; -import org.junit.jupiter.api.Test; - -import java.util.List; - -public class ColocatePlanTest extends TestWithFeService { - public static final String COLOCATE_ENABLE = "COLOCATE"; - private static final String GLOBAL_GROUP = "__global__group1"; - private static final String GLOBAL_GROUP2 = "__global__group2"; - - @Override - protected void runBeforeAll() throws Exception { - FeConstants.runningUnitTest = true; - createDatabase("db1"); - createTable("create table db1.test_colocate(k1 int, k2 int, k3 int, k4 int) " - + "distributed by hash(k1, k2) buckets 10 properties('replication_num' = '2'," - + "'colocate_with' = 'group1');"); - createTable("create table db1.test(k1 int, k2 int, k3 int, k4 int)" - + "partition by range(k1) (partition p1 values less than (\"1\"), partition p2 values less than (\"2\"))" - + "distributed by hash(k1, k2) buckets 10 properties('replication_num' = '2')"); - createTable("create table db1.test_multi_partition(k1 int, k2 int)" - + "partition by range(k1) (partition p1 values less than(\"1\"), partition p2 values less than (\"2\"))" - + "distributed by hash(k2) buckets 10 properties ('replication_num' = '2', 'colocate_with' = 'group2')"); - - // global colocate tables - createDatabase("db2"); - createTable("create table db1.test_global_colocate1(k1 varchar(10), k2 int, k3 int, k4 int) " - + "distributed by hash(k1, k2) buckets 10 properties('replication_num' = '2'," - + "'colocate_with' = '" + GLOBAL_GROUP + "');"); - createTable("create table db2.test_global_colocate2(k1 varchar(20), k2 int, k3 int) " - + "distributed by hash(k1, k2) buckets 10 properties('replication_num' = '2'," - + "'colocate_with' = '" + GLOBAL_GROUP + "');"); - createTable("create table db2.test_global_colocate3(k1 varchar(20), k2 int, k3 date) " - + "partition by range(k3) (partition p1 values less than(\"2020-01-01\"), partition p2 values less than (\"2020-02-01\")) " - + "distributed by hash(k1, k2) buckets 10 properties('replication_num' = '2'," - + "'colocate_with' = '" + GLOBAL_GROUP + "');"); - } - - @Override - protected int backendNum() { - return 2; - } - - // without - // 1. agg: group by column < distributed columns - // 2. join: src data has been redistributed - @Test - public void sqlDistributedSmallerThanData1() throws Exception { - String plan1 = getSQLPlanOrErrorMsg( - "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from (select k1 from db1.test_colocate group by k1) a , db1.test_colocate b " - + "where a.k1=b.k1"); - Assert.assertEquals(2, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertTrue(plan1.contains(DistributedPlanColocateRule.REDISTRIBUTED_SRC_DATA)); - } - - // without : join column < distributed columns; - @Test - public void sqlDistributedSmallerThanData2() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from (select k1 from db1.test_colocate group by k1, k2) a , db1.test_colocate b " - + "where a.k1=b.k1"; - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertTrue(plan1.contains(DistributedPlanColocateRule.INCONSISTENT_DISTRIBUTION_OF_TABLE_AND_QUERY)); - } - - // with: - // 1. agg columns = distributed columns - // 2. hash columns = agg output columns = distributed columns - @Test - public void sqlAggAndJoinSameAsTableMeta() throws Exception { - String sql = - "explain select * from (select k1, k2 from db1.test_colocate group by k1, k2) a , db1.test_colocate b " - + "where a.k1=b.k1 and a.k2=b.k2"; - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(1, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertTrue(plan1.contains(COLOCATE_ENABLE)); - } - - // with: - // 1. agg columns > distributed columns - // 2. hash columns = agg output columns > distributed columns - @Test - public void sqlAggAndJoinMoreThanTableMeta() throws Exception { - String sql = "explain select * from (select k1, k2, k3 from db1.test_colocate group by k1, k2, k3) a , " - + "db1.test_colocate b where a.k1=b.k1 and a.k2=b.k2 and a.k3=b.k3"; - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(1, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertTrue(plan1.contains(COLOCATE_ENABLE)); - } - - // with: - // 1. agg columns > distributed columns - // 2. hash columns = distributed columns - @Test - public void sqlAggMoreThanTableMeta() throws Exception { - String sql = "explain select * from (select k1, k2, k3 from db1.test_colocate group by k1, k2, k3) a , " - + "db1.test_colocate b where a.k1=b.k1 and a.k2=b.k2"; - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(1, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertTrue(plan1.contains(COLOCATE_ENABLE)); - } - - // without: - // 1. agg columns = distributed columns - // 2. table is not in colocate group - // 3. more then 1 instances - // Fixed #6028 - @Test - public void sqlAggWithNonColocateTable() throws Exception { - String sql = "explain select k1, k2 from db1.test group by k1, k2"; - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(2, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertFalse(plan1.contains(COLOCATE_ENABLE)); - } - - // check colocate add scan range - // Fix #6726 - // 1. colocate agg node - // 2. scan node with two tablet one instance - @Test - public void sqlAggWithColocateTable() throws Exception { - connectContext.getSessionVariable().setParallelResultSink(false); - String sql = "select k1, k2, count(*) from db1.test_multi_partition where k2 = 1 group by k1, k2"; - StmtExecutor executor = getSqlStmtExecutor(sql); - Planner planner = executor.planner(); - Coordinator coordinator = Deencapsulation.getField(executor, "coord"); - List scanNodeList = planner.getScanNodes(); - Assert.assertEquals(scanNodeList.size(), 1); - Assert.assertTrue(scanNodeList.get(0) instanceof OlapScanNode); - OlapScanNode olapScanNode = (OlapScanNode) scanNodeList.get(0); - Assert.assertEquals(olapScanNode.getSelectedPartitionIds().size(), 2); - long selectedTablet = Deencapsulation.getField(olapScanNode, "selectedSplitNum"); - Assert.assertEquals(selectedTablet, 2); - - List instanceInfo = coordinator.getFragmentInstanceInfos(); - Assert.assertEquals(instanceInfo.size(), 2); - } - - @Test - public void checkColocatePlanFragment() throws Exception { - connectContext.getSessionVariable().setEnableSharedScan(false); - String sql - = "select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.test_colocate a, db1.test_colocate b where a.k1=b.k1 and a.k2=b.k2 group by a.k1;"; - StmtExecutor executor = getSqlStmtExecutor(sql); - Planner planner = executor.planner(); - Coordinator coordinator = Deencapsulation.getField(executor, "coord"); - boolean isColocateFragment0 = Deencapsulation.invoke(coordinator, "isColocateFragment", - planner.getFragments().get(1), planner.getFragments().get(1).getPlanRoot()); - Assert.assertFalse(isColocateFragment0); - boolean isColocateFragment1 = Deencapsulation.invoke(coordinator, "isColocateFragment", - planner.getFragments().get(2), planner.getFragments().get(2).getPlanRoot()); - Assert.assertTrue(isColocateFragment1); - } - - // Fix #8778 - @Test - public void rollupAndMoreThanOneInstanceWithoutColocate() throws Exception { - connectContext.getSessionVariable().setParallelResultSink(false); - String createColocateTblStmtStr = "create table db1.test_colocate_one_backend(k1 int, k2 int, k3 int, k4 int) " - + "distributed by hash(k1, k2, k3) buckets 10 properties('replication_num' = '1');"; - createTable(createColocateTblStmtStr); - String sql = "select a.k1, a.k2, sum(a.k3) " - + "from db1.test_colocate_one_backend a join[shuffle] db1.test_colocate_one_backend b on a.k1=b.k1 " - + "group by rollup(a.k1, a.k2);"; - Deencapsulation.setField(connectContext.getSessionVariable(), "parallelExecInstanceNum", 2); - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(2, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertEquals(5, StringUtils.countMatches(plan1, "PLAN FRAGMENT")); - } - - @Test - public void testGlobalColocateGroup() throws Exception { - Database db1 = Env.getCurrentEnv().getInternalCatalog().getDbNullable("db1"); - Database db2 = Env.getCurrentEnv().getInternalCatalog().getDbNullable("db2"); - OlapTable tbl1 = (OlapTable) db1.getTableNullable("test_global_colocate1"); - OlapTable tbl2 = (OlapTable) db2.getTableNullable("test_global_colocate2"); - OlapTable tbl3 = (OlapTable) db2.getTableNullable("test_global_colocate3"); - - String sql = "explain select * from (select k1, k2 from " - + "db1.test_global_colocate1 group by k1, k2) a , db2.test_global_colocate2 b " - + "where a.k1=b.k1 and a.k2=b.k2"; - String plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(1, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertTrue(plan1.contains(COLOCATE_ENABLE)); - ColocateTableIndex colocateTableIndex = Env.getCurrentColocateIndex(); - ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema( - GroupId.getFullGroupName(1000, GLOBAL_GROUP)); - Assert.assertNotNull(groupSchema); - GroupId groupId = groupSchema.getGroupId(); - List tableIds = colocateTableIndex.getAllTableIds(groupId); - Assert.assertEquals(3, tableIds.size()); - Assert.assertTrue(tableIds.contains(tbl1.getId())); - Assert.assertTrue(tableIds.contains(tbl2.getId())); - Assert.assertTrue(tableIds.contains(tbl3.getId())); - Assert.assertEquals(3, groupId.getTblId2DbIdSize()); - Assert.assertEquals(db1.getId(), groupId.getDbIdByTblId(tbl1.getId())); - Assert.assertEquals(db2.getId(), groupId.getDbIdByTblId(tbl2.getId())); - Assert.assertEquals(db2.getId(), groupId.getDbIdByTblId(tbl3.getId())); - - sql = "explain select * from (select k1, k2 from " - + "db1.test_global_colocate1 group by k1, k2) a , db2.test_global_colocate3 b " - + "where a.k1=b.k1 and a.k2=b.k2"; - plan1 = getSQLPlanOrErrorMsg(sql); - Assert.assertEquals(1, StringUtils.countMatches(plan1, "AGGREGATE")); - Assert.assertTrue(plan1.contains(COLOCATE_ENABLE)); - - String addPartitionStmt - = "alter table db2.test_global_colocate3 add partition p3 values less than (\"2020-03-01\");"; - alterTableSync(addPartitionStmt); - - try { - createTable("create table db1.test_global_colocate4(k1 int, k2 int, k3 int, k4 int) " - + "distributed by hash(k1, k2) buckets 10 properties('replication_num' = '2'," - + "'colocate_with' = '" + GLOBAL_GROUP + "');"); - Assert.fail(); - } catch (Exception e) { - e.printStackTrace(); - Assert.assertTrue( - e.getMessage().contains("Colocate tables distribution columns must have the same data type")); - List tmpTableIds = colocateTableIndex.getAllTableIds(groupId); - Assert.assertEquals(3, tmpTableIds.size()); - Assert.assertTrue(tmpTableIds.contains(tbl1.getId())); - Assert.assertTrue(tmpTableIds.contains(tbl2.getId())); - Assert.assertTrue(tmpTableIds.contains(tbl3.getId())); - Assert.assertEquals(3, groupId.getTblId2DbIdSize()); - Assert.assertEquals(db1.getId(), groupId.getDbIdByTblId(tbl1.getId())); - Assert.assertEquals(db2.getId(), groupId.getDbIdByTblId(tbl2.getId())); - Assert.assertEquals(db2.getId(), groupId.getDbIdByTblId(tbl3.getId())); - } - - // modify table's colocate group - String modifyStmt = "alter table db2.test_global_colocate3 set ('colocate_with' = '');"; - alterTableSync(modifyStmt); - tableIds = colocateTableIndex.getAllTableIds(groupId); - Assert.assertEquals(2, tableIds.size()); - Assert.assertTrue(tableIds.contains(tbl1.getId())); - Assert.assertTrue(tableIds.contains(tbl2.getId())); - Assert.assertEquals(2, groupId.getTblId2DbIdSize()); - Assert.assertEquals(db1.getId(), groupId.getDbIdByTblId(tbl1.getId())); - Assert.assertEquals(db2.getId(), groupId.getDbIdByTblId(tbl2.getId())); - - // change table's colocate group - modifyStmt = "alter table db2.test_global_colocate2 set ('colocate_with' = '" + GLOBAL_GROUP2 + "');"; - alterTableSync(modifyStmt); - tableIds = colocateTableIndex.getAllTableIds(groupId); - Assert.assertEquals(1, tableIds.size()); - Assert.assertTrue(tableIds.contains(tbl1.getId())); - Assert.assertEquals(1, groupId.getTblId2DbIdSize()); - Assert.assertEquals(db1.getId(), groupId.getDbIdByTblId(tbl1.getId())); - - GroupId groupId2 = colocateTableIndex.getGroupSchema( - GroupId.getFullGroupName(1000, GLOBAL_GROUP2)).getGroupId(); - tableIds = colocateTableIndex.getAllTableIds(groupId2); - Assert.assertEquals(1, tableIds.size()); - Assert.assertTrue(tableIds.contains(tbl2.getId())); - Assert.assertEquals(1, groupId2.getTblId2DbIdSize()); - Assert.assertEquals(db2.getId(), groupId2.getDbIdByTblId(tbl2.getId())); - - // checkpoint - // Get currentCatalog first - Env currentEnv = Env.getCurrentEnv(); - // Save real ckptThreadId - long ckptThreadId = currentEnv.getCheckpointer().getId(); - try { - // set checkpointThreadId to current thread id, so that when do checkpoint manually here, - // the Catalog.isCheckpointThread() will return true. - Deencapsulation.setField(Env.class, "checkpointThreadId", Thread.currentThread().getId()); - currentEnv.getCheckpointer().doCheckpoint(); - } catch (Throwable e) { - e.printStackTrace(); - Assert.fail(e.getMessage()); - } finally { - // Restore the ckptThreadId - Deencapsulation.setField(Env.class, "checkpointThreadId", ckptThreadId); - } - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/DistributedPlannerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/DistributedPlannerTest.java deleted file mode 100644 index 6f3a66ab3bab92..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/DistributedPlannerTest.java +++ /dev/null @@ -1,171 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.analysis.CreateDbStmt; -import org.apache.doris.analysis.CreateTableStmt; -import org.apache.doris.analysis.ExplainOptions; -import org.apache.doris.analysis.TupleId; -import org.apache.doris.catalog.Env; -import org.apache.doris.common.jmockit.Deencapsulation; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.StmtExecutor; -import org.apache.doris.utframe.UtFrameUtils; - -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import mockit.Expectations; -import mockit.Injectable; -import mockit.Mocked; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.util.List; -import java.util.Set; -import java.util.UUID; - -public class DistributedPlannerTest { - private static String runningDir = "fe/mocked/DemoTest/" + UUID.randomUUID().toString() + "/"; - private static ConnectContext ctx; - - @BeforeClass - public static void setUp() throws Exception { - UtFrameUtils.createDorisCluster(runningDir); - ctx = UtFrameUtils.createDefaultCtx(); - ctx.getSessionVariable().setEnableNereidsPlanner(false); - String createDbStmtStr = "create database db1;"; - CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, ctx); - Env.getCurrentEnv().createDb(createDbStmt); - // create table tbl1 - String createTblStmtStr = "create table db1.tbl1(k1 int, k2 varchar(32), v bigint sum) " - + "AGGREGATE KEY(k1,k2) distributed by hash(k1) buckets 1 properties('replication_num' = '1');"; - CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); - Env.getCurrentEnv().createTable(createTableStmt); - // create table tbl2 - createTblStmtStr = "create table db1.tbl2(k3 int, k4 varchar(32)) " - + "DUPLICATE KEY(k3) distributed by hash(k3) buckets 1 properties('replication_num' = '1');"; - createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); - Env.getCurrentEnv().createTable(createTableStmt); - } - - @After - public void tearDown() throws Exception { - FileUtils.deleteDirectory(new File(runningDir)); - } - - @Test - public void testAssertFragmentWithDistributedInput(@Injectable AssertNumRowsNode assertNumRowsNode, - @Injectable PlanFragment inputFragment, - @Injectable PlanNodeId planNodeId, - @Injectable PlanFragmentId planFragmentId, - @Injectable PlanNode inputPlanRoot, - @Injectable TupleId tupleId, - @Mocked PlannerContext plannerContext) { - DistributedPlanner distributedPlanner = new DistributedPlanner(plannerContext); - - List tupleIdList = Lists.newArrayList(tupleId); - Set tupleIdSet = Sets.newHashSet(tupleId); - Deencapsulation.setField(inputPlanRoot, "tupleIds", tupleIdList); - Deencapsulation.setField(inputPlanRoot, "tblRefIds", tupleIdList); - Deencapsulation.setField(inputPlanRoot, "nullableTupleIds", Sets.newHashSet(tupleId)); - Deencapsulation.setField(inputPlanRoot, "conjuncts", Lists.newArrayList()); - new Expectations() { - { - inputPlanRoot.getOutputTupleDesc(); - result = null; - inputFragment.isPartitioned(); - result = true; - plannerContext.getNextNodeId(); - result = planNodeId; - plannerContext.getNextFragmentId(); - result = planFragmentId; - inputFragment.getPlanRoot(); - result = inputPlanRoot; - inputPlanRoot.getTupleIds(); - result = tupleIdList; - inputPlanRoot.getTblRefIds(); - result = tupleIdList; - inputPlanRoot.getNullableTupleIds(); - result = tupleIdSet; - assertNumRowsNode.getChildren(); - result = inputPlanRoot; - } - }; - - PlanFragment assertFragment = Deencapsulation.invoke(distributedPlanner, "createAssertFragment", - assertNumRowsNode, inputFragment); - Assert.assertFalse(assertFragment.isPartitioned()); - Assert.assertSame(assertNumRowsNode, assertFragment.getPlanRoot()); - } - - @Test - public void testAssertFragmentWithUnpartitionInput(@Injectable AssertNumRowsNode assertNumRowsNode, - @Injectable PlanFragment inputFragment, - @Mocked PlannerContext plannerContext) { - DistributedPlanner distributedPlanner = new DistributedPlanner(plannerContext); - - PlanFragment assertFragment = Deencapsulation.invoke(distributedPlanner, "createAssertFragment", - assertNumRowsNode, inputFragment); - Assert.assertSame(assertFragment, inputFragment); - Assert.assertTrue(assertFragment.getPlanRoot() instanceof AssertNumRowsNode); - } - - @Test - public void testExplicitlyBroadcastJoin() throws Exception { - String sql = "explain select * from db1.tbl1 join [BROADCAST] db1.tbl2 on tbl1.k1 = tbl2.k3"; - StmtExecutor stmtExecutor = new StmtExecutor(ctx, sql); - stmtExecutor.execute(); - Planner planner = stmtExecutor.planner(); - String plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assert.assertEquals(1, StringUtils.countMatches(plan, "INNER JOIN(BROADCAST)")); - - sql = "explain select * from db1.tbl1 join [SHUFFLE] db1.tbl2 on tbl1.k1 = tbl2.k3"; - stmtExecutor = new StmtExecutor(ctx, sql); - stmtExecutor.execute(); - planner = stmtExecutor.planner(); - plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assert.assertEquals(1, StringUtils.countMatches(plan, "INNER JOIN(PARTITIONED)")); - } - - @Test - public void testBroadcastJoinCostThreshold() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 join db1.tbl2 on tbl1.k1 = tbl2.k3"; - StmtExecutor stmtExecutor = new StmtExecutor(ctx, sql); - stmtExecutor.execute(); - Planner planner = stmtExecutor.planner(); - String plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assert.assertEquals(1, StringUtils.countMatches(plan, "INNER JOIN(BROADCAST)")); - - double originThreshold = ctx.getSessionVariable().autoBroadcastJoinThreshold; - try { - ctx.getSessionVariable().autoBroadcastJoinThreshold = -1.0; - stmtExecutor = new StmtExecutor(ctx, sql); - stmtExecutor.execute(); - planner = stmtExecutor.planner(); - plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assert.assertEquals(1, StringUtils.countMatches(plan, "INNER JOIN(PARTITIONED)")); - } finally { - ctx.getSessionVariable().autoBroadcastJoinThreshold = originThreshold; - } - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java deleted file mode 100644 index 573a19bd9dbd96..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/PlannerTest.java +++ /dev/null @@ -1,734 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.analysis.ExplainOptions; -import org.apache.doris.analysis.Expr; -import org.apache.doris.analysis.UserIdentity; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.FeConstants; -import org.apache.doris.qe.QueryState; -import org.apache.doris.qe.QueryState.MysqlStateType; -import org.apache.doris.qe.StmtExecutor; -import org.apache.doris.utframe.TestWithFeService; - -import org.apache.commons.lang3.StringUtils; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.util.List; - -public class PlannerTest extends TestWithFeService { - - @Override - protected void runBeforeAll() throws Exception { - - connectContext.getSessionVariable().setEnableNereidsPlanner(false); - - // Create database `db1`. - createDatabase("db1"); - - // Create tables. - String tbl1 = "create table db1.tbl1(" + "k1 varchar(32), " + "k2 varchar(32), " + "k3 varchar(32), " - + "k4 int) " + "AGGREGATE KEY(k1, k2,k3,k4) " + "distributed by hash(k1) buckets 3 " - + "properties('replication_num' = '1');"; - - String tbl2 = "create table db1.tbl2(" + "k1 int, " + "k2 int sum) " + "AGGREGATE KEY(k1) " - + "partition by range(k1) () " + "distributed by hash(k1) buckets 3 " - + "properties('replication_num' = '1');"; - - String tbl3 = "create table db1.tbl3 (" + "k1 date, " + "k2 varchar(128) NULL, " + "k3 varchar(5000) NULL) " - + "DUPLICATE KEY(k1, k2, k3) " + "distributed by hash(k1) buckets 1 " - + "properties ('replication_num' = '1');"; - - String tbl4 = "create table db1.tbl4(" + "k1 int," + " k2 int," + " v1 int)" + " distributed by hash(k1)" - + " properties('replication_num' = '1');"; - - String tbl5 = "create table db1.tbl5(" + "k1 int," + "k2 int) " + "DISTRIBUTED BY HASH(k2) " - + "BUCKETS 3 PROPERTIES ('replication_num' = '1');"; - - String tbl6 = "create table db1.tbl6(" + "k1 int," + "k2 int, " + "v1 int)" + "UNIQUE KEY (k1, k2)" - + "DISTRIBUTED BY HASH(k2) " + "BUCKETS 3 PROPERTIES ('replication_num' = '1');"; - - createTables(tbl1, tbl2, tbl3, tbl4, tbl5, tbl6); - } - - @Test - public void testSetOperation() throws Exception { - // union - String sql1 = "explain select * from\n" - + " (select k1, k2 from db1.tbl1\n" - + " union all\n" - + " select k1, k2 from db1.tbl1) a\n" - + " inner join\n" - + " db1.tbl1 b\n" - + " on (a.k1 = b.k1)\n" - + "where b.k1 = 'a'"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(1, StringUtils.countMatches(plan1, "UNION")); - String sql2 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 where k1='a' and k4=1\n" - + "union distinct\n" - + " (select * from db1.tbl1 where k1='b' and k4=2\n" - + " union all\n" - + " select * from db1.tbl1 where k1='b' and k4=2)\n" - + "union distinct\n" - + " (select * from db1.tbl1 where k1='b' and k4=2\n" - + " union all\n" - + " (select * from db1.tbl1 where k1='b' and k4=3)\n" - + " order by 3 limit 3)\n" - + "union all\n" - + " (select * from db1.tbl1 where k1='b' and k4=3\n" - + " union all\n" - + " select * from db1.tbl1 where k1='b' and k4=4)\n" - + "union all\n" - + " (select * from db1.tbl1 where k1='b' and k4=3\n" - + " union all\n" - + " (select * from db1.tbl1 where k1='b' and k4=5)\n" - + " order by 3 limit 3)"; - StmtExecutor stmtExecutor2 = new StmtExecutor(connectContext, sql2); - stmtExecutor2.execute(); - Planner planner2 = stmtExecutor2.planner(); - String plan2 = planner2.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(4, StringUtils.countMatches(plan2, "UNION")); - - // intersect - String sql3 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from\n" - + " (select k1, k2 from db1.tbl1\n" - + " intersect\n" - + " select k1, k2 from db1.tbl1) a\n" - + " inner join\n" - + " db1.tbl1 b\n" - + " on (a.k1 = b.k1)\n" - + "where b.k1 = 'a'"; - StmtExecutor stmtExecutor3 = new StmtExecutor(connectContext, sql3); - stmtExecutor3.execute(); - Planner planner3 = stmtExecutor3.planner(); - String plan3 = planner3.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(1, StringUtils.countMatches(plan3, "INTERSECT")); - String sql4 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 where k1='a' and k4=1\n" - + "intersect distinct\n" - + " (select * from db1.tbl1 where k1='b' and k4=2\n" - + " intersect\n" - + " select * from db1.tbl1 where k1='b' and k4=2)\n" - + "intersect distinct\n" - + " (select * from db1.tbl1 where k1='b' and k4=2\n" - + " intersect\n" - + " (select * from db1.tbl1 where k1='b' and k4=3)\n" - + " order by 3 limit 3)\n" - + "intersect\n" - + " (select * from db1.tbl1 where k1='b' and k4=3\n" - + " intersect\n" - + " select * from db1.tbl1 where k1='b' and k4=4)\n" - + "intersect\n" - + " (select * from db1.tbl1 where k1='b' and k4=3\n" - + " intersect\n" - + " (select * from db1.tbl1 where k1='b' and k4=5)\n" - + " order by 3 limit 3)"; - - StmtExecutor stmtExecutor4 = new StmtExecutor(connectContext, sql4); - stmtExecutor4.execute(); - Planner planner4 = stmtExecutor4.planner(); - String plan4 = planner4.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(3, StringUtils.countMatches(plan4, "INTERSECT")); - - // except - String sql5 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from\n" - + " (select k1, k2 from db1.tbl1\n" - + " except\n" - + " select k1, k2 from db1.tbl1) a\n" - + " inner join\n" - + " db1.tbl1 b\n" - + " on (a.k1 = b.k1)\n" - + "where b.k1 = 'a'"; - StmtExecutor stmtExecutor5 = new StmtExecutor(connectContext, sql5); - stmtExecutor5.execute(); - Planner planner5 = stmtExecutor5.planner(); - String plan5 = planner5.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(1, StringUtils.countMatches(plan5, "EXCEPT")); - - String sql6 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 where k1='a' and k4=1\n" - + "except\n" - + "select * from db1.tbl1 where k1='a' and k4=1\n" - + "except\n" - + "select * from db1.tbl1 where k1='a' and k4=2\n" - + "except distinct\n" - + "(select * from db1.tbl1 where k1='a' and k4=2)\n" - + "order by 3 limit 3"; - StmtExecutor stmtExecutor6 = new StmtExecutor(connectContext, sql6); - stmtExecutor6.execute(); - Planner planner6 = stmtExecutor6.planner(); - String plan6 = planner6.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(1, StringUtils.countMatches(plan6, "EXCEPT")); - - String sql7 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 where k1='a' and k4=1\n" - + "except distinct\n" - + "select * from db1.tbl1 where k1='a' and k4=1\n" - + "except\n" - + "select * from db1.tbl1 where k1='a' and k4=2\n" - + "except\n" - + "(select * from db1.tbl1 where k1='a' and k4=2)\n" - + "order by 3 limit 3"; - StmtExecutor stmtExecutor7 = new StmtExecutor(connectContext, sql7); - stmtExecutor7.execute(); - Planner planner7 = stmtExecutor7.planner(); - String plan7 = planner7.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(1, StringUtils.countMatches(plan7, "EXCEPT")); - - // mixed - String sql8 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 where k1='a' and k4=1\n" - + "union\n" - + "select * from db1.tbl1 where k1='a' and k4=1\n" - + "except\n" - + "select * from db1.tbl1 where k1='a' and k4=2\n" - + "intersect\n" - + "(select * from db1.tbl1 where k1='a' and k4=2)\n" - + "order by 3 limit 3"; - StmtExecutor stmtExecutor8 = new StmtExecutor(connectContext, sql8); - stmtExecutor8.execute(); - Planner planner8 = stmtExecutor8.planner(); - String plan8 = planner8.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(1, StringUtils.countMatches(plan8, "UNION")); - Assertions.assertEquals(1, StringUtils.countMatches(plan8, "INTERSECT")); - Assertions.assertEquals(1, StringUtils.countMatches(plan8, "EXCEPT")); - - String sql9 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1 where k1='a' and k4=1\n" - + "intersect distinct\n" - + " (select * from db1.tbl1 where k1='b' and k4=2\n" - + " union all\n" - + " select * from db1.tbl1 where k1='b' and k4=2)\n" - + "intersect distinct\n" - + " (select * from db1.tbl1 where k1='b' and k4=2\n" - + " except\n" - + " (select * from db1.tbl1 where k1='b' and k4=3)\n" - + " order by 3 limit 3)\n" - + "union all\n" - + " (select * from db1.tbl1 where k1='b' and k4=3\n" - + " intersect\n" - + " select * from db1.tbl1 where k1='b' and k4=4)\n" - + "except\n" - + " (select * from db1.tbl1 where k1='b' and k4=3\n" - + " intersect\n" - + " (select * from db1.tbl1 where k1='b' and k4=5)\n" - + " order by 3 limit 3)"; - - StmtExecutor stmtExecutor9 = new StmtExecutor(connectContext, sql9); - stmtExecutor9.execute(); - Planner planner9 = stmtExecutor9.planner(); - String plan9 = planner9.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertEquals(2, StringUtils.countMatches(plan9, "UNION")); - Assertions.assertEquals(3, StringUtils.countMatches(plan9, "INTERSECT")); - Assertions.assertEquals(2, StringUtils.countMatches(plan9, "EXCEPT")); - - String sql10 = "select /*+ SET_VAR(enable_nereids_planner=false) */ 499 union select 670 except select 499"; - StmtExecutor stmtExecutor10 = new StmtExecutor(connectContext, sql10); - stmtExecutor10.execute(); - Planner planner10 = stmtExecutor10.planner(); - List fragments10 = planner10.getFragments(); - Assertions.assertTrue(fragments10.get(0).getPlanRoot().getFragment() - .getPlanRoot().getChild(0) instanceof AggregationNode); - Assertions.assertTrue(fragments10.get(0).getPlanRoot() - .getFragment().getPlanRoot().getChild(1) instanceof UnionNode); - - String sql11 = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.x FROM\n" - + "(SELECT '01' x) a \n" - + "INNER JOIN\n" - + "(SELECT '01' x UNION all SELECT '02') b"; - StmtExecutor stmtExecutor11 = new StmtExecutor(connectContext, sql11); - stmtExecutor11.execute(); - Planner planner11 = stmtExecutor11.planner(); - SetOperationNode setNode11 = (SetOperationNode) (planner11.getFragments().get(1).getPlanRoot()); - Assertions.assertEquals(2, setNode11.getMaterializedConstExprLists().size()); - - String sql12 = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.x \n" - + "FROM (SELECT '01' x) a \n" - + "INNER JOIN \n" - + "(SELECT k1 from db1.tbl1 \n" - + "UNION all \n" - + "SELECT k1 from db1.tbl1) b;"; - StmtExecutor stmtExecutor12 = new StmtExecutor(connectContext, sql12); - stmtExecutor12.execute(); - Planner planner12 = stmtExecutor12.planner(); - SetOperationNode setNode12 = (SetOperationNode) (planner12.getFragments().get(1).getPlanRoot()); - Assertions.assertEquals(2, setNode12.getMaterializedResultExprLists().size()); - } - - @Test - public void testPushDown() throws Exception { - String sql1 = - "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ \n" - + " IF(k2 IS NULL, 'ALL', k2) AS k2,\n" - + " IF(k3 IS NULL, 'ALL', k3) AS k3,\n" - + " k4\n" - + "FROM\n" - + "(\n" - + " SELECT\n" - + " k1,\n" - + " k2,\n" - + " k3,\n" - + " SUM(k4) AS k4\n" - + " FROM db1.tbl1\n" - + " WHERE k1 = 0\n" - + " AND k4 = 1\n" - + " AND k3 = 'foo'\n" - + " GROUP BY \n" - + " GROUPING SETS (\n" - + " (k1),\n" - + " (k1, k2),\n" - + " (k1, k3),\n" - + " (k1, k2, k3)\n" - + " )\n" - + ") t\n" - + "WHERE IF(k2 IS NULL, 'ALL', k2) = 'ALL'"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - List fragments1 = planner1.getFragments(); - Assertions.assertEquals("if", - fragments1.get(0).getPlanRoot().conjuncts.get(0).getChild(0).getFn().functionName()); - Assertions.assertEquals(3, fragments1.get(0).getPlanRoot().getChild(0).getChild(0).conjuncts.size()); - - String sql2 = - "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ \n" - + " IF(k2 IS NULL, 'ALL', k2) AS k2,\n" - + " IF(k3 IS NULL, 'ALL', k3) AS k3,\n" - + " k4\n" - + "FROM\n" - + "(\n" - + " SELECT\n" - + " k1,\n" - + " k2,\n" - + " k3,\n" - + " SUM(k4) AS k4\n" - + " FROM db1.tbl1\n" - + " WHERE k1 = 0\n" - + " AND k4 = 1\n" - + " AND k3 = 'foo'\n" - + " GROUP BY k1, k2, k3\n" - + ") t\n" - + "WHERE IF(k2 IS NULL, 'ALL', k2) = 'ALL'"; - StmtExecutor stmtExecutor2 = new StmtExecutor(connectContext, sql2); - stmtExecutor2.execute(); - Planner planner2 = stmtExecutor2.planner(); - List fragments2 = planner2.getFragments(); - Assertions.assertEquals(4, fragments2.get(0).getPlanRoot().getChild(0).conjuncts.size()); - - } - - @Test - public void testWithStmtSlotIsAllowNull() throws Exception { - // union - String sql1 = "with a as (select NULL as user_id ), " - + "b as ( select '543' as user_id) " - + "select /*+ SET_VAR(enable_nereids_planner=false) */ user_id from a union all select user_id from b"; - - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(true, false, false)); - Assertions.assertEquals(2, StringUtils.countMatches(plan1, "nullable=true")); - } - - @Test - public void testAccessingVisibleColumnWithoutPartition() throws Exception { - String sql = "select count(k1) from db1.tbl2"; - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); - stmtExecutor.execute(); - Assertions.assertNotNull(stmtExecutor.planner()); - } - - @Test - public void testAnalyticSortNodeLeftJoin() throws Exception { - String sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.k1, a.k3, SUM(COUNT(t.k2)) OVER (PARTITION BY a.k3 ORDER BY a.k1) AS c\n" - + "FROM ( SELECT k1, k3 FROM db1.tbl3) a\n" - + "LEFT JOIN (SELECT 1 AS line, k1, k2, k3 FROM db1.tbl3) t\n" - + "ON t.k1 = a.k1 AND t.k3 = a.k3\n" - + "GROUP BY a.k1, a.k3"; - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); - stmtExecutor.execute(); - Assertions.assertNotNull(stmtExecutor.planner()); - Planner planner = stmtExecutor.planner(); - List fragments = planner.getFragments(); - Assertions.assertTrue(fragments.size() > 0); - PlanNode node = fragments.get(0).getPlanRoot().getChild(0); - Assertions.assertTrue(node.getChildren().size() > 0); - Assertions.assertTrue(node instanceof SortNode); - SortNode sortNode = (SortNode) node; - List tupleExprs = sortNode.resolvedTupleExprs; - List sortTupleExprs = sortNode.getSortInfo().getSortTupleSlotExprs(); - for (Expr expr : tupleExprs) { - expr.isBoundByTupleIds(sortNode.getChild(0).tupleIds); - } - for (Expr expr : sortTupleExprs) { - expr.isBoundByTupleIds(sortNode.getChild(0).tupleIds); - } - } - - - @Test - public void testBigintSlotRefCompareDecimalLiteral() { - java.util.function.BiConsumer compare = (sql1, sql2) -> { - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - try { - stmtExecutor1.execute(); - } catch (Exception e) { - e.printStackTrace(); - } - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - - StmtExecutor stmtExecutor2 = new StmtExecutor(connectContext, sql2); - try { - stmtExecutor2.execute(); - } catch (Exception e) { - e.printStackTrace(); - } - Planner planner2 = stmtExecutor2.planner(); - String plan2 = planner2.getExplainString(new ExplainOptions(false, false, false)); - - Assertions.assertEquals(plan1, plan2); - }; - - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl2 where k1 = 2.0", "select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl2 where k1 = 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl2 where k1 = 2.1", "select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl2 where 2 = 2.1"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 != 2.0", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 != 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 != 2.1", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where TRUE"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 <= 2.0", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 <= 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 <= 2.1", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 <= 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 >= 2.0", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 >= 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 >= 2.1", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 > 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 < 2.0", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 < 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 < 2.1", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 <= 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 > 2.0", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 > 2"); - compare.accept("select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 > 2.1", "select /*+ SET_VAR(enable_nereids_planner=false) */* from db1.tbl2 where k1 > 2"); - } - - @Test - public void testStringType() { - String createTbl1 = "create table db1.tbl1(k1 string, k2 varchar(32), k3 varchar(32), k4 int) " - + "AGGREGATE KEY(k1, k2,k3,k4) distributed by hash(k1) buckets 3 properties('replication_num' = '1')"; - AnalysisException exception = Assertions.assertThrows( - AnalysisException.class, () -> parseAndAnalyzeStmt(createTbl1)); - Assertions.assertTrue(exception.getMessage().contains("String Type should not be used in key column[k1].")); - } - - @Test - public void testPushDownPredicateOnGroupingSetAggregate() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1, k2, count(distinct v1) from db1.tbl4" - + " group by grouping sets((k1), (k1, k2)) having k1 = 1 and k2 = 1"; - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); - stmtExecutor.execute(); - Planner planner = stmtExecutor.planner(); - String plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan.contains("`k1` = 1")); - } - - @Test - public void testPushDownPredicateOnRollupAggregate() throws Exception { - String sql = "explain select k1, k2, count(distinct v1) from db1.tbl4" - + " group by rollup(k1, k2) having k1 = 1 and k2 = 1"; - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); - stmtExecutor.execute(); - Planner planner = stmtExecutor.planner(); - String plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan.contains("PREDICATES:")); - } - - @Test - public void testPushDownPredicateOnNormalAggregate() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1, k2, count(distinct v1) from db1.tbl4" - + " group by k1, k2 having k1 = 1 and k2 = 1"; - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); - stmtExecutor.execute(); - Planner planner = stmtExecutor.planner(); - String plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan.contains("(`k1` = 1) AND (`k2` = 1)")); - } - - @Test - public void testPushDownPredicateOnWindowFunction() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ v1, k1," - + " sum(v1) over (partition by k1 order by v1 rows between 1 preceding and 1 following)" - + " as 'moving total' from db1.tbl4 where k1 = 1"; - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, sql); - stmtExecutor.execute(); - Planner planner = stmtExecutor.planner(); - String plan = planner.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan.contains("`k1` = 1")); - } - - @Test - public void testRewriteNestedUnionStmt() throws Exception { - String qSQL = "SELECT k1 FROM db1.tbl5 WHERE k1 IN " - + "( SELECT k1 FROM ( SELECT k1 FROM db1.tbl5 ORDER BY k2 DESC, k1 DESC LIMIT 300 INTERSECT " - + "(SELECT k1 FROM db1.tbl5 ORDER BY k2 DESC, k1 DESC LIMIT 9 EXCEPT SELECT k1 " - + "FROM db1.tbl5 ORDER BY k2 DESC, k1 DESC LIMIT 2) ) t )"; - - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, qSQL); - stmtExecutor.execute(); - } - - @Test - public void testUpdateUnique() throws Exception { - String qSQL = "update db1.tbl6 set v1=5 where k1=5"; - UserIdentity user1 = new UserIdentity("cmy", "%"); - user1.setIsAnalyzed(); - // check user priv - connectContext.setCurrentUserIdentity(user1); - StmtExecutor stmtExecutor = new StmtExecutor(connectContext, qSQL); - stmtExecutor.execute(); - QueryState state = connectContext.getState(); - Assertions.assertEquals(MysqlStateType.ERR, state.getStateType()); - Assertions.assertTrue(state.getErrorMessage() - .contains("you need (at least one of) the (LOAD) privilege(s) for this operation")); - // set to admin user - connectContext.setCurrentUserIdentity(UserIdentity.ADMIN); - } - - @Test - public void testPushSortToOlapScan() throws Exception { - // Push sort fail without limit - String sql1 = "explain select k1 from db1.tbl3 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("SORT INFO:\n `k1`\n `k2`")); - Assertions.assertFalse(plan1.contains("SORT LIMIT:")); - - // Push sort fail limit > topnOptLimitThreshold - sql1 = "explain select k1 from db1.tbl3 order by k1, k2 limit " - + (connectContext.getSessionVariable().topnOptLimitThreshold + 1); - stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - planner1 = stmtExecutor1.planner(); - plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("SORT INFO:\n `k1`\n `k2`")); - Assertions.assertFalse(plan1.contains("SORT LIMIT:")); - - // Push sort success limit = topnOptLimitThreshold - sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl3 order by k1, k2 limit " - + (connectContext.getSessionVariable().topnOptLimitThreshold); - stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - planner1 = stmtExecutor1.planner(); - plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("SORT INFO:\n `k1`\n `k2`")); - Assertions.assertTrue(plan1.contains("SORT LIMIT:")); - - // Push sort success limit < topnOptLimitThreshold - if (connectContext.getSessionVariable().topnOptLimitThreshold > 1) { - sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl3 order by k1, k2 limit " - + (connectContext.getSessionVariable().topnOptLimitThreshold - 1); - stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - planner1 = stmtExecutor1.planner(); - plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("SORT INFO:\n `k1`\n `k2`")); - Assertions.assertTrue(plan1.contains("SORT LIMIT:")); - } - - // Push sort failed - String sql2 = "explain select k1, k2, k3 from db1.tbl3 order by k1, k3, k2"; - StmtExecutor stmtExecutor2 = new StmtExecutor(connectContext, sql2); - stmtExecutor2.execute(); - Planner planner2 = stmtExecutor2.planner(); - String plan2 = planner2.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan2.contains("SORT INFO:")); - Assertions.assertFalse(plan2.contains("SORT LIMIT:")); - } - - @Test - public void testEliminatingSortNode() throws Exception { - // fail case 1 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 = 1 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // fail case 2 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 = 1 and k3 = 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // fail case 3 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 = 1 and k2 != 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // fail case 4 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 = 1 or k2 = 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // fail case 5 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 = 1 and k2 = 2 or k3 = 3 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // fail case 6 - // TODO, support: in (select 1) - { - String sql1 = "explain select k1 from db1.tbl1 where k1 in (select 1) and k2 = 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // fail case 7 - { - String sql1 = "explain select k1 from db1.tbl1 where k1 not in (1) and k2 = 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("order by:")); - } - - // success case 1 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 = 1 and k2 = 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("order by:")); - } - - // success case 2 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k3 = 3 and k2 = 2 and k1 = 1 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("order by:")); - } - - // success case 3 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 in (1) and k2 in (2) and k2 !=2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("order by:")); - } - - // success case 4 - { - String sql1 = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where k1 in (concat('1','2')) and k2 = 2 order by k1, k2"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("order by:")); - } - - // success case 5 - { - String sql1 = "explain select tbl1.k1 from db1.tbl1 join db1.tbl2 on tbl1.k1 = tbl2.k1" - + " where tbl1.k1 = 1 and tbl2.k1 = 2 and tbl1.k2 = 3 order by tbl1.k1, tbl2.k1"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("order by:")); - } - } - - @Test - public void testInsertPlan() throws Exception { - FeConstants.runningUnitTest = true; - // 1. should not contains exchange node in old planner - boolean v = connectContext.getSessionVariable().isEnableNereidsPlanner(); - try { - connectContext.getSessionVariable().setEnableNereidsPlanner(false); - String sql1 = "explain insert into db1.tbl1 select * from db1.tbl1"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("VEXCHANGE")); - } finally { - connectContext.getSessionVariable().setEnableNereidsPlanner(v); - } - - // 2. should not contains exchange node in new planner - v = connectContext.getSessionVariable().isEnableNereidsPlanner(); - boolean v2 = connectContext.getSessionVariable().isEnableStrictConsistencyDml(); - try { - connectContext.getSessionVariable().setEnableNereidsPlanner(true); - connectContext.getSessionVariable().setEnableStrictConsistencyDml(false); - String sql1 = "explain insert into db1.tbl1 select * from db1.tbl1"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertFalse(plan1.contains("VEXCHANGE")); - } finally { - connectContext.getSessionVariable().setEnableNereidsPlanner(v); - connectContext.getSessionVariable().setEnableStrictConsistencyDml(v2); - } - - // 3. should contain exchange node in new planner if enable strict consistency dml - v = connectContext.getSessionVariable().isEnableNereidsPlanner(); - v2 = connectContext.getSessionVariable().isEnableStrictConsistencyDml(); - try { - connectContext.getSessionVariable().setEnableNereidsPlanner(true); - connectContext.getSessionVariable().setEnableStrictConsistencyDml(true); - String sql1 = "explain insert into db1.tbl1 select * from db1.tbl1"; - StmtExecutor stmtExecutor1 = new StmtExecutor(connectContext, sql1); - stmtExecutor1.execute(); - Planner planner1 = stmtExecutor1.planner(); - String plan1 = planner1.getExplainString(new ExplainOptions(false, false, false)); - Assertions.assertTrue(plan1.contains("VEXCHANGE")); - } finally { - connectContext.getSessionVariable().setEnableNereidsPlanner(v); - connectContext.getSessionVariable().setEnableStrictConsistencyDml(v2); - } - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ProjectPlannerFunctionTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ProjectPlannerFunctionTest.java deleted file mode 100644 index 53198a63b15dfc..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ProjectPlannerFunctionTest.java +++ /dev/null @@ -1,111 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.analysis.CreateDbStmt; -import org.apache.doris.analysis.CreateTableStmt; -import org.apache.doris.catalog.Env; -import org.apache.doris.common.jmockit.Deencapsulation; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.utframe.UtFrameUtils; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.util.UUID; - -public class ProjectPlannerFunctionTest { - - private static String runningDir = "fe/mocked/ProjectPlannerFunctionTest/" + UUID.randomUUID().toString() + "/"; - - private static ConnectContext connectContext; - - @BeforeClass - public static void beforeClass() throws Exception { - UtFrameUtils.createDorisCluster(runningDir); - - // create connect context - connectContext = UtFrameUtils.createDefaultCtx(); - - // enable hash project - Deencapsulation.setField(connectContext.getSessionVariable(), "enableProjection", true); - - // create database - String createDbStmtStr = "create database test;"; - CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext); - Env.getCurrentEnv().createDb(createDbStmt); - String createTableStmtStr = "create table test.t1 (k1 int, k2 int) distributed by hash (k1) " - + "properties(\"replication_num\" = \"1\")"; - CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTableStmtStr, connectContext); - Env.getCurrentEnv().createTable(createTableStmt); - } - - @AfterClass - public static void tearDown() { - File file = new File(runningDir); - file.delete(); - } - - // keep a.k2 after a join b - @Test - public void projectByAgg() throws Exception { - String queryStr = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k2 from test.t1 a , test.t1 b where a.k1=b.k1 group by a.k2;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, queryStr); - Assert.assertTrue(explainString.contains("output slot ids: 0")); - } - - // keep a.k2 after a join b - @Test - public void projectBySort() throws Exception { - String queryStr = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k2 from test.t1 a , test.t1 b where a.k1=b.k1 order by a.k2;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, queryStr); - Assert.assertTrue(explainString.contains("output slot ids: 0")); - } - - // keep a.k2 after a join c - // keep a.k1, a.k2 after a join b - @Test - public void projectByJoin() throws Exception { - String queryStr = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k2 from test.t1 a inner join test.t1 b on a.k1=b.k1 " - + "inner join test.t1 c on a.k1=c.k1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, queryStr); - Assert.assertTrue(explainString.contains("output slot ids: 8")); - Assert.assertTrue(explainString.contains("output slot ids: 4 5")); - } - - // keep a.k2 after a join b - @Test - public void projectByResultExprs() throws Exception { - String queryStr = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k2 from test.t1 a , test.t1 b where a.k1=b.k1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, queryStr); - Assert.assertTrue(explainString.contains("output slot ids: 0")); - } - - // keep b.k1 after a join b - // keep a.k2, b.k1, b.k2 after hash table - @Test - public void projectHashTable() throws Exception { - String queryStr = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ b.k1 from test.t1 a right join test.t1 b on a.k1=b.k1 and b.k2>1 where a.k2>1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, queryStr); - Assert.assertTrue(explainString.contains("output slot ids: 1")); - Assert.assertTrue(explainString.contains("hash output slot ids: 1 2 3")); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java index 829d1b0d1e507c..cecd752644018d 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java @@ -44,8 +44,6 @@ public class QueryCacheNormalizerTest extends TestWithFeService { @Override protected void runBeforeAll() throws Exception { - connectContext.getSessionVariable().setEnableNereidsPlanner(true); - // Create database `db1`. createDatabase("db1"); @@ -114,7 +112,6 @@ protected void runBeforeAll() throws Exception { createTables(nonPart, part1, part2, multiLeveParts); - connectContext.getSessionVariable().setEnableNereidsPlanner(true); connectContext.getSessionVariable().setDisableNereidsRules("PRUNE_EMPTY_PARTITION"); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java deleted file mode 100644 index 36194494ee1da2..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java +++ /dev/null @@ -1,2300 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.analysis.Analyzer; -import org.apache.doris.analysis.CreateDbStmt; -import org.apache.doris.analysis.DropDbStmt; -import org.apache.doris.analysis.ExplainTest; -import org.apache.doris.analysis.Expr; -import org.apache.doris.analysis.InformationFunction; -import org.apache.doris.analysis.SelectStmt; -import org.apache.doris.analysis.ShowCreateDbStmt; -import org.apache.doris.analysis.StatementBase; -import org.apache.doris.catalog.Database; -import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.MaterializedIndex; -import org.apache.doris.catalog.MaterializedIndex.IndexExtState; -import org.apache.doris.catalog.OlapTable; -import org.apache.doris.catalog.Partition; -import org.apache.doris.catalog.Replica; -import org.apache.doris.catalog.ScalarType; -import org.apache.doris.catalog.Tablet; -import org.apache.doris.catalog.Type; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.FeConstants; -import org.apache.doris.common.jmockit.Deencapsulation; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.QueryState.MysqlStateType; -import org.apache.doris.rewrite.RewriteDateLiteralRuleTest; -import org.apache.doris.thrift.TRuntimeFilterType; -import org.apache.doris.utframe.TestWithFeService; -import org.apache.doris.utframe.UtFrameUtils; - -import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; -import org.junit.Assert; -import org.junit.Ignore; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -import java.util.List; - -public class QueryPlanTest extends TestWithFeService { - - @Override - protected void runBeforeAll() throws Exception { - FeConstants.runningUnitTest = true; - // disable bucket shuffle join - Deencapsulation.setField(connectContext.getSessionVariable(), "enableBucketShuffleJoin", false); - connectContext.getSessionVariable().setEnableRuntimeFilterPrune(false); - // create database - createDatabase("test"); - connectContext.getSessionVariable().setEnableNereidsPlanner(false); - connectContext.getSessionVariable().setEnableFoldConstantByBe(false); - Config.enable_odbc_mysql_broker_table = true; - - createTable("create table test.test1\n" - + "(\n" - + " query_id varchar(48) comment \"Unique query id\",\n" - + " time_col datetime not null comment \"Query start time\",\n" - + " client_ip varchar(32) comment \"Client IP\",\n" - + " user varchar(64) comment \"User name\",\n" - + " db varchar(96) comment \"Database of this query\",\n" - + " state varchar(8) comment \"Query result state. EOF, ERR, OK\",\n" - + " query_time bigint comment \"Query execution time in millisecond\",\n" - + " scan_bytes bigint comment \"Total scan bytes of this query\",\n" - + " scan_rows bigint comment \"Total scan rows of this query\",\n" - + " return_rows bigint comment \"Returned rows of this query\",\n" - + " stmt_id int comment \"An incremental id of statement\",\n" - + " is_query tinyint comment \"Is this statemt a query. 1 or 0\",\n" - + " frontend_ip varchar(32) comment \"Frontend ip of executing this statement\",\n" - + " stmt varchar(2048) comment \"The original statement, trimed if longer than 2048 bytes\"\n" - + ")\n" - + "partition by range(time_col) ()\n" - + "distributed by hash(query_id) buckets 1\n" - + "properties(\n" - + " \"dynamic_partition.time_unit\" = \"DAY\",\n" - + " \"dynamic_partition.start\" = \"-30\",\n" - + " \"dynamic_partition.end\" = \"3\",\n" - + " \"dynamic_partition.prefix\" = \"p\",\n" - + " \"dynamic_partition.buckets\" = \"1\",\n" - + " \"dynamic_partition.enable\" = \"true\",\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.bitmap_table (\n" - + " `id` int(11) NULL COMMENT \"\",\n" - + " `id2` bitmap bitmap_union \n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`id`)\n" - + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.join1 (\n" - + " `dt` int(11) COMMENT \"\",\n" - + " `id` int(11) COMMENT \"\",\n" - + " `value` varchar(8) COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`dt`, `id`)\n" - + "PARTITION BY RANGE(`dt`)\n" - + "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" - + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.join2 (\n" - + " `dt` int(11) COMMENT \"\",\n" - + " `id` int(11) COMMENT \"\",\n" - + " `value` varchar(8) COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`dt`, `id`)\n" - + "PARTITION BY RANGE(`dt`)\n" - + "(PARTITION p1 VALUES LESS THAN (\"10\"))\n" - + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.bitmap_table_2 (\n" - + " `id` int(11) NULL COMMENT \"\",\n" - + " `id2` bitmap bitmap_union ,\n" - + " `id3` bitmap bitmap_union \n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`id`)\n" - + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.hll_table (\n" - + " `id` int(11) NULL COMMENT \"\",\n" - + " `id2` hll hll_union \n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`id`)\n" - + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.`bigtable` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\",\n" - + " `k3` int(11) NULL COMMENT \"\",\n" - + " `k4` bigint(20) NULL COMMENT \"\",\n" - + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" - + " `k6` char(5) NULL COMMENT \"\",\n" - + " `k10` date NULL COMMENT \"\",\n" - + " `k11` datetime NULL COMMENT \"\",\n" - + " `k7` varchar(20) NULL COMMENT \"\",\n" - + " `k8` double MAX NULL COMMENT \"\",\n" - + " `k9` float SUM NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.`baseall` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\",\n" - + " `k3` int(11) NULL COMMENT \"\",\n" - + " `k4` bigint(20) NULL COMMENT \"\",\n" - + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" - + " `k6` char(5) NULL COMMENT \"\",\n" - + " `k10` date NULL COMMENT \"\",\n" - + " `k11` datetime NULL COMMENT \"\",\n" - + " `k7` varchar(20) NULL COMMENT \"\",\n" - + " `k8` double MAX NULL COMMENT \"\",\n" - + " `k9` float SUM NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.`dynamic_partition` (\n" - + " `k1` date NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\",\n" - + " `k3` int(11) NULL COMMENT \"\",\n" - + " `k4` bigint(20) NULL COMMENT \"\",\n" - + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" - + " `k6` char(5) NULL COMMENT \"\",\n" - + " `k10` date NULL COMMENT \"\",\n" - + " `k11` datetime NULL COMMENT \"\",\n" - + " `k7` varchar(20) NULL COMMENT \"\",\n" - + " `k8` double MAX NULL COMMENT \"\",\n" - + " `k9` float SUM NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE (k1)\n" - + "(\n" - + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" - + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" - + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" - + ")\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"dynamic_partition.enable\" = \"true\",\n" - + "\"dynamic_partition.start\" = \"-3\",\n" - + "\"dynamic_partition.end\" = \"3\",\n" - + "\"dynamic_partition.time_unit\" = \"day\",\n" - + "\"dynamic_partition.prefix\" = \"p\",\n" - + "\"dynamic_partition.buckets\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.`app_profile` (\n" - + " `event_date` date NOT NULL COMMENT \"\",\n" - + " `app_name` varchar(64) NOT NULL COMMENT \"\",\n" - + " `package_name` varchar(64) NOT NULL COMMENT \"\",\n" - + " `age` varchar(32) NOT NULL COMMENT \"\",\n" - + " `gender` varchar(32) NOT NULL COMMENT \"\",\n" - + " `level` varchar(64) NOT NULL COMMENT \"\",\n" - + " `city` varchar(64) NOT NULL COMMENT \"\",\n" - + " `model` varchar(64) NOT NULL COMMENT \"\",\n" - + " `brand` varchar(64) NOT NULL COMMENT \"\",\n" - + " `hours` varchar(16) NOT NULL COMMENT \"\",\n" - + " `use_num` int(11) SUM NOT NULL COMMENT \"\",\n" - + " `use_time` double SUM NOT NULL COMMENT \"\",\n" - + " `start_times` bigint(20) SUM NOT NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, " - + "`city`, `model`, `brand`, `hours`) COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE(`event_date`)\n" - + "(PARTITION p_20200301 VALUES [('2020-02-27'), ('2020-03-02')),\n" - + "PARTITION p_20200306 VALUES [('2020-03-02'), ('2020-03-07')))\n" - + "DISTRIBUTED BY HASH(`event_date`, `app_name`, `package_name`, `age`, `gender`, `level`, " - + "`city`, `model`, `brand`, `hours`) BUCKETS 1\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.`pushdown_test` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\",\n" - + " `k3` int(11) NULL COMMENT \"\",\n" - + " `k4` bigint(20) NULL COMMENT \"\",\n" - + " `k5` decimal(9, 3) NULL COMMENT \"\",\n" - + " `k6` char(5) NULL COMMENT \"\",\n" - + " `k10` date NULL COMMENT \"\",\n" - + " `k11` datetime NULL COMMENT \"\",\n" - + " `k7` varchar(20) NULL COMMENT \"\",\n" - + " `k8` double MAX NULL COMMENT \"\",\n" - + " `k9` float SUM NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`, `k10`, `k11`, `k7`)\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE(`k1`)\n" - + "(PARTITION p1 VALUES [(\"-128\"), (\"-64\")),\n" - + "PARTITION p2 VALUES [(\"-64\"), (\"0\")),\n" - + "PARTITION p3 VALUES [(\"0\"), (\"64\")))\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"DEFAULT\"\n" - + ");"); - - createTable("create table test.jointest\n" - + "(k1 int, k2 int) distributed by hash(k1) buckets 1\n" - + "properties(\"replication_num\" = \"1\");"); - - createTable("create table test.bucket_shuffle1\n" - + "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 5\n" - + "properties(\"replication_num\" = \"1\"" - + ");"); - - createTable("CREATE TABLE test.`bucket_shuffle2` (\n" - + " `k1` int NULL COMMENT \"\",\n" - + " `k2` int(6) NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE(`k1`)\n" - + "(PARTITION p1 VALUES [(\"-128\"), (\"-64\")),\n" - + "PARTITION p2 VALUES [(\"-64\"), (\"0\")),\n" - + "PARTITION p3 VALUES [(\"0\"), (\"64\")))\n" - + "DISTRIBUTED BY HASH(k1, k2) BUCKETS 5\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"DEFAULT\"\n" - + ");"); - - createTable("create table test.colocate1\n" - + "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 1\n" - + "properties(\"replication_num\" = \"1\"," - + "\"colocate_with\" = \"group1\");"); - - createTable("create table test.colocate2\n" - + "(k1 int, k2 int, k3 int) distributed by hash(k1, k2) buckets 1\n" - + "properties(\"replication_num\" = \"1\"," - + "\"colocate_with\" = \"group1\");"); - - createTable("create external table test.mysql_table\n" - + "(k1 int, k2 int)\n" - + "ENGINE=MYSQL\n" - + "PROPERTIES (\n" - + "\"host\" = \"127.0.0.1\",\n" - + "\"port\" = \"3306\",\n" - + "\"user\" = \"root\",\n" - + "\"password\" = \"123\",\n" - + "\"database\" = \"db1\",\n" - + "\"table\" = \"tbl1\"\n" - + ");"); - - createTable("CREATE TABLE test.`table_partitioned` (\n" - + " `dt` int(11) NOT NULL COMMENT \"\",\n" - + " `dis_key` varchar(20) NOT NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`dt`, `dis_key`)\n" - + "PARTITION BY RANGE(`dt`)\n" - + "(PARTITION p20200101 VALUES [(\"-1\"), (\"20200101\")),\n" - + "PARTITION p20200201 VALUES [(\"20200101\"), (\"20200201\")))\n" - + "DISTRIBUTED BY HASH(`dt`, `dis_key`) BUCKETS 2\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");"); - - createTable("CREATE TABLE test.`table_unpartitioned` (\n" - + " `dt` int(11) NOT NULL COMMENT \"\",\n" - + " `dis_key` varchar(20) NOT NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`dt`, `dis_key`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`dt`, `dis_key`) BUCKETS 2\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");"); - - createTable("create external table test.odbc_oracle\n" - + "(k1 float, k2 int)\n" - + "ENGINE=ODBC\n" - + "PROPERTIES (\n" - + "\"host\" = \"127.0.0.1\",\n" - + "\"port\" = \"3306\",\n" - + "\"user\" = \"root\",\n" - + "\"password\" = \"123\",\n" - + "\"database\" = \"db1\",\n" - + "\"table\" = \"tbl1\",\n" - + "\"driver\" = \"Oracle Driver\",\n" - + "\"odbc_type\" = \"oracle\"\n" - + ");"); - - createTable("create external table test.odbc_mysql\n" - + "(k1 int, k2 int)\n" - + "ENGINE=ODBC\n" - + "PROPERTIES (\n" - + "\"host\" = \"127.0.0.1\",\n" - + "\"port\" = \"3306\",\n" - + "\"user\" = \"root\",\n" - + "\"password\" = \"123\",\n" - + "\"database\" = \"db1\",\n" - + "\"table\" = \"tbl1\",\n" - + "\"driver\" = \"Oracle Driver\",\n" - + "\"odbc_type\" = \"mysql\"\n" - + ");"); - - createTable("create table test.tbl_int_date (" - + "`date` datetime NULL," - + "`day` date NULL," - + "`site_id` int(11) NULL )" - + " ENGINE=OLAP " - + "DUPLICATE KEY(`date`, `day`, `site_id`)" - + "DISTRIBUTED BY HASH(`site_id`) BUCKETS 10 " - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ");"); - - createView("create view test.tbl_null_column_view AS SELECT *,NULL as add_column FROM test.test1;"); - - createView("create view test.function_view AS SELECT query_id, client_ip, concat(user, db) as" - + " concat FROM test.test1;"); - - createTable("create table test.tbl_using_a\n" - + "(\n" - + " k1 int,\n" - + " k2 int,\n" - + " v1 int sum\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 3 " - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"" - + ");"); - - createTable("create table test.tbl_using_b\n" - + "(\n" - + " k1 int,\n" - + " k2 int,\n" - + " k3 int \n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 3 " - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"" - + ");"); - } - - @Test - public void testFunctionViewGroupingSet() throws Exception { - String queryStr = "select /*+ SET_VAR(enable_nereids_planner=false) */ query_id, client_ip, concat from test.function_view group by rollup(" - + "query_id, client_ip, concat);"; - assertSQLPlanOrErrorMsgContains(queryStr, "repeat: repeat 3 lines [[], [8], [8, 9], [8, 9, 10]]"); - } - - @Test - public void testBitmapInsertInto() throws Exception { - String sql = "INSERT INTO test.bitmap_table (id, id2) VALUES (1001, to_bitmap(1000)), (1001, to_bitmap(2000));"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("OLAP TABLE SINK")); - - sql = "insert into test.bitmap_table select id, bitmap_union(id2) from test.bitmap_table_2 group by id;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("OLAP TABLE SINK")); - Assert.assertTrue(explainString.contains("bitmap_union")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "AGGREGATE")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 0, "OlapScanNode")); - - sql = "insert into test.bitmap_table select /*+ SET_VAR(enable_nereids_planner=false) */ id, id2 from test.bitmap_table_2;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("OLAP TABLE SINK")); - Assert.assertTrue(explainString.contains("OUTPUT EXPRS:\n `id`\n `id2`")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 0, "OlapScanNode")); - - assertSQLPlanOrErrorMsgContains("insert into test.bitmap_table select id, id from test.bitmap_table_2;", - "bitmap column require the function return type is BITMAP"); - } - - @Test - public void testBitmapQuery() throws Exception { - assertSQLPlanOrErrorMsgContains( - "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.bitmap_table;", - "OUTPUT EXPRS:\n `test`.`bitmap_table`.`id`\n `test`.`bitmap_table`.`id2`" - ); - - assertSQLPlanOrErrorMsgContains( - "select count(id2) from test.bitmap_table;", - "No matching function with signature" - ); - - assertSQLPlanOrErrorMsgContains( - "select group_concat(id2) from test.bitmap_table;", - "group_concat requires first parameter to be of type STRING: group_concat(`id2`)" - ); - - assertSQLPlanOrErrorMsgContains( - "select sum(id2) from test.bitmap_table;", - "sum requires a numeric parameter: sum(`id2`)" - ); - - assertSQLPlanOrErrorMsgContains( - "select avg(id2) from test.bitmap_table;", - "avg requires a numeric parameter: avg(`id2`)" - ); - - assertSQLPlanOrErrorMsgContains( - "select max(id2) from test.bitmap_table;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select min(id2) from test.bitmap_table;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select count(*) from test.bitmap_table group by id2;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select count(*) from test.bitmap_table where id2 = 1;", - "Unsupported bitmap type in expression: (`id2` = 1)" - ); - - } - - @Test - public void testHLLTypeQuery() throws Exception { - assertSQLPlanOrErrorMsgContains( - "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.hll_table;", - "OUTPUT EXPRS:\n `test`.`hll_table`.`id`\n `test`.`hll_table`.`id2`" - ); - - assertSQLPlanOrErrorMsgContains( - "select count(id2) from test.hll_table;", - "No matching function with signature" - ); - - assertSQLPlanOrErrorMsgContains( - "select group_concat(id2) from test.hll_table;", - "group_concat requires first parameter to be of type STRING: group_concat(`id2`)" - ); - - assertSQLPlanOrErrorMsgContains( - "select sum(id2) from test.hll_table;", - "sum requires a numeric parameter: sum(`id2`)" - ); - - assertSQLPlanOrErrorMsgContains( - "select avg(id2) from test.hll_table;", - "avg requires a numeric parameter: avg(`id2`)" - ); - - assertSQLPlanOrErrorMsgContains( - "select max(id2) from test.hll_table;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select min(id2) from test.hll_table;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select min(id2) from test.hll_table;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select count(*) from test.hll_table group by id2;", - Type.OnlyMetricTypeErrorMsg - ); - - assertSQLPlanOrErrorMsgContains( - "select count(*) from test.hll_table where id2 = 1", - "Hll type dose not support operand: (`id2` = 1)" - ); - } - - @Test - public void testTypeCast() throws Exception { - // cmy: this test may sometimes failed in our daily test env, so I add a case here. - String sql = "select * from test.baseall a where k1 in (select k1 from test.bigtable b where k2 > 0 and k1 = 1);"; - getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertEquals(MysqlStateType.EOF, connectContext.getState().getStateType()); - - sql = "SHOW VARIABLES LIKE 'lower_case_%'; SHOW VARIABLES LIKE 'sql_mode'"; - List stmts = parseAndAnalyzeStmts(sql); - Assert.assertEquals(2, stmts.size()); - - // disable cast hll/bitmap to string - assertSQLPlanOrErrorMsgContains( - "select cast(id2 as varchar) from test.hll_table;", - "Invalid type cast of `id2` from hll to varchar(65533)" - ); - assertSQLPlanOrErrorMsgContains( - "select cast(id2 as varchar) from test.bitmap_table;", - "Invalid type cast of `id2` from bitmap to varchar(65533)" - ); - // disable implicit cast hll/bitmap to string - assertSQLPlanOrErrorMsgContains( - "select length(id2) from test.hll_table;", - "No matching function with signature: length(hll)" - ); - assertSQLPlanOrErrorMsgContains( - "select length(id2) from test.bitmap_table;", - "No matching function with signature: length(bitmap)" - ); - } - - @Test - public void testMultiStmts() throws Exception { - String sql = "SHOW VARIABLES LIKE 'lower_case_%'; SHOW VARIABLES LIKE 'sql_mode'"; - List stmts = parseAndAnalyzeStmts(sql); - Assert.assertEquals(2, stmts.size()); - - sql = "SHOW VARIABLES LIKE 'lower_case_%';;;"; - stmts = parseAndAnalyzeStmts(sql); - Assert.assertEquals(1, stmts.size()); - - sql = "SHOW VARIABLES LIKE 'lower_case_%';;;SHOW VARIABLES LIKE 'lower_case_%';"; - stmts = parseAndAnalyzeStmts(sql); - Assert.assertEquals(4, stmts.size()); - - sql = "SHOW VARIABLES LIKE 'lower_case_%'"; - stmts = parseAndAnalyzeStmts(sql); - Assert.assertEquals(1, stmts.size()); - } - - @Test - public void testCountDistinctRewrite() throws Exception { - String sql = "select count(distinct id) from test.bitmap_table"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("output: count")); - - sql = "select count(distinct id2) from test.bitmap_table"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select sum(id) / count(distinct id2) from test.bitmap_table"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select count(distinct id2) from test.hll_table"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("hll_union_agg")); - - sql = "select sum(id) / count(distinct id2) from test.hll_table"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("hll_union_agg")); - - sql = "select count(distinct id2) from test.bitmap_table group by id order by count(distinct id2)"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select count(distinct id2) from test.bitmap_table having count(distinct id2) > 0"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select count(distinct id2) from test.bitmap_table order by count(distinct id2)"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select count(distinct if(id = 1, id2, null)) from test.bitmap_table"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select count(distinct ifnull(id2, id3)) from test.bitmap_table_2"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - sql = "select count(distinct coalesce(id2, id3)) from test.bitmap_table_2"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("bitmap_union_count")); - - ConnectContext.get().getSessionVariable().setRewriteCountDistinct(false); - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ count(distinct id2) from test.bitmap_table"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("No matching function with signature")); - } - - @Test - public void testCreateDbQueryPlanWithSchemaSyntax() throws Exception { - String createSchemaSql = "create schema if not exists test"; - String createDbSql = "create database if not exists test"; - CreateDbStmt createSchemaStmt = (CreateDbStmt) parseAndAnalyzeStmt(createSchemaSql); - CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbSql); - Assert.assertEquals(createDbStmt.toSql(), createSchemaStmt.toSql()); - } - - @Test - public void testDropDbQueryPlanWithSchemaSyntax() throws Exception { - String dropSchemaSql = "drop schema if exists test"; - String dropDbSql = "drop database if exists test"; - DropDbStmt dropSchemaStmt = (DropDbStmt) parseAndAnalyzeStmt(dropSchemaSql); - DropDbStmt dropDbStmt = (DropDbStmt) parseAndAnalyzeStmt(dropDbSql); - Assert.assertEquals(dropDbStmt.toSql(), dropSchemaStmt.toSql()); - } - - @Test - public void testShowCreateDbQueryPlanWithSchemaSyntax() throws Exception { - String showCreateSchemaSql = "show create schema test"; - String showCreateDbSql = "show create database test"; - ShowCreateDbStmt showCreateSchemaStmt = (ShowCreateDbStmt) parseAndAnalyzeStmt(showCreateSchemaSql); - ShowCreateDbStmt showCreateDbStmt = (ShowCreateDbStmt) parseAndAnalyzeStmt(showCreateDbSql); - Assert.assertEquals(showCreateDbStmt.toSql(), showCreateSchemaStmt.toSql()); - } - - @Test - public void testDateTypeCastSyntax() throws Exception { - String castSql = "select * from test.baseall where k11 < cast('2020-03-26' as date)"; - SelectStmt selectStmt = (SelectStmt) parseAndAnalyzeStmt(castSql); - Expr rightExpr = selectStmt.getWhereClause().getChildren().get(1); - Assert.assertEquals(rightExpr.getType(), ScalarType.getDefaultDateType(Type.DATETIME)); - - String castSql2 = "select /*+ SET_VAR(enable_nereids_planner=false) */ str_to_date('11/09/2011', '%m/%d/%Y');"; - String explainString = getSQLPlanOrErrorMsg("explain " + castSql2); - Assert.assertTrue(explainString.contains("2011-11-09")); - Assert.assertFalse(explainString.contains("2011-11-09 00:00:00")); - } - - @Test - public void testJoinPredicateTransitivity() throws Exception { - connectContext.setDatabase("test"); - - ConnectContext.get().getSessionVariable().setEnableInferPredicate(true); - /* TODO: commit on_clause and where_clause Cross-identification - // test left join : left table where binary predicate - String sql = "select join1.id\n" - + "from join1\n" - + "left join join2 on join1.id = join2.id\n" - + "where join1.id > 1;"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); - - // test left join: left table where in predicate - sql = "select join1.id\n" - + "from join1\n" - + "left join join2 on join1.id = join2.id\n" - + "where join1.id in (2);"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` IN (2)")); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` IN (2)")); - - // test left join: left table where between predicate - sql = "select join1.id\n" - + "from join1\n" - + "left join join2 on join1.id = join2.id\n" - + "where join1.id BETWEEN 1 AND 2;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` >= 1, `join1`.`id` <= 2")); - Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` >= 1, `join2`.`id` <= 2")); - - */ - // test left join: left table join predicate, left table couldn't push down - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "left join join2 on join1.id = join2.id\n" - + "and join1.id > 1;"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains(" > 1")); - Assert.assertFalse(explainString.contains("`join1`.`id` > 1")); - - /* - // test left join: right table where predicate. - // If we eliminate outer join, we could push predicate down to join1 and join2. - // Currently, we push predicate to join1 and keep join predicate for join2 - sql = "select *\n from join1\n" - + "left join join2 on join1.id = join2.id\n" - + "where join2.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); - Assert.assertFalse(explainString.contains("other join predicates: `join2`.`id` > 1")); - */ - - // test left join: right table join predicate, only push down right table - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "left join join2 on join1.id = join2.id\n" - + "and join2.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("`join2`.`id` > 1")); - Assert.assertFalse(explainString.contains("`join1`.`id` > 1")); - - /* - // test inner join: left table where predicate, both push down left table and right table - sql = "select *\n from join1\n" - + "join join2 on join1.id = join2.id\n" - + "where join1.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); - Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); - */ - - // test inner join: left table join predicate, both push down left table and right table - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "join join2 on join1.id = join2.id\n" - + "and join1.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("`join1`.`id` > 1")); - Assert.assertTrue(explainString.contains("`join2`.`id` > 1")); - - /* - // test inner join: right table where predicate, both push down left table and right table - sql = "select *\n from join1\n" - + "join join2 on join1.id = join2.id\n" - + "where join2.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); - Assert.assertTrue(explainString.contains("PREDICATES: `join2`.`id` > 1")); - */ - - // test inner join: right table join predicate, both push down left table and right table - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - - + "join join2 on join1.id = join2.id\n" + "and 1 < join2.id;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("`join1`.`id` > 1")); - Assert.assertTrue(explainString.contains("`join2`.`id` > 1")); - - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "join join2 on join1.id = join2.value\n" - + "and join2.value in ('abc');"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertFalse(explainString.contains("'abc' is not a number")); - Assert.assertFalse(explainString.contains("`join1`.`value` IN ('abc')")); - - // test anti join, right table join predicate, only push to right table - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "left anti join join2 on join1.id = join2.id\n" - + "and join2.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("`join2`.`id` > 1")); - Assert.assertFalse(explainString.contains("`join1`.`id` > 1")); - - // test semi join, right table join predicate, only push to right table - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "left semi join join2 on join1.id = join2.id\n" - + "and join2.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("`join2`.`id` > 1")); - Assert.assertTrue(explainString.contains("`join1`.`id` > 1")); - - // test anti join, left table join predicate, left table couldn't push down - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "left anti join join2 on join1.id = join2.id\n" - + "and join1.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains(" > 1")); - Assert.assertFalse(explainString.contains("`join1`.`id` > 1")); - - // test semi join, left table join predicate, only push to left table - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ *\n from join1\n" - + "left semi join join2 on join1.id = join2.id\n" - + "and join1.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("`join1`.`id` > 1")); - - /* - // test anti join, left table where predicate, only push to left table - sql = "select join1.id\n" - + "from join1\n" - + "left anti join join2 on join1.id = join2.id\n" - + "where join1.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); - Assert.assertFalse(explainString.contains("PREDICATES: `join2`.`id` > 1")); - - // test semi join, left table where predicate, only push to left table - sql = "select join1.id\n" - + "from join1\n" - + "left semi join join2 on join1.id = join2.id\n" - + "where join1.id > 1;"; - explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `join1`.`id` > 1")); - Assert.assertFalse(explainString.contains("PREDICATES: `join2`.`id` > 1")); - */ - } - - @Disabled - public void testConvertCaseWhenToConstant() throws Exception { - // basic test - String caseWhenSql = "select " - + "case when date_format(now(),'%H%i') < 123 then 1 else 0 end as col " - + "from test.test1 " - + "where time_col = case when date_format(now(),'%H%i') < 123 then date_format(date_sub(" - + "now(),2),'%Y%m%d') else date_format(date_sub(now(),1),'%Y%m%d') end"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + caseWhenSql), - "CASE WHEN")); - - // test 1: case when then - // 1.1 multi when in on `case when` and can be converted to constants - String sql11 = "select case when false then 2 when true then 3 else 0 end as col11;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql11), - "constant exprs: \n 3")); - - // 1.2 multi `when expr` in on `case when` ,`when expr` can not be converted to constants - String sql121 = "select case when false then 2 when substr(k7,2,1) then 3 else 0 end as col121 from" - + " test.baseall"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql121), - "OUTPUT EXPRS:\n CASE WHEN substr(`k7`, 2, 1) THEN 3 ELSE 0 END")); - - // 1.2.2 when expr which can not be converted to constants in the first - String sql122 = "select case when substr(k7,2,1) then 2 when false then 3 else 0 end as col122" - + " from test.baseall"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql122), - "OUTPUT EXPRS:\n CASE WHEN substr(`k7`, 2, 1) THEN 2 WHEN FALSE THEN 3 ELSE 0 END")); - - // 1.2.3 test return `then expr` in the middle - String sql124 = "select case when false then 1 when true then 2 when false then 3 else 'other' end as col124"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql124), - "constant exprs: \n '2'")); - - // 1.3 test return null - String sql3 = "select case when false then 2 end as col3"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql3), - "constant exprs: \n NULL")); - - // 1.3.1 test return else expr - String sql131 = "select case when false then 2 when false then 3 else 4 end as col131"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql131), - "constant exprs: \n 4")); - - // 1.4 nest `case when` and can be converted to constants - String sql14 = "select case when (case when true then true else false end) then 2 when false then 3 else 0 end as col"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql14), - "constant exprs: \n 2")); - - // 1.5 nest `case when` and can not be converted to constants - String sql15 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case when case when substr(k7,2,1) then true else false end then 2 when false then 3" - + " else 0 end as col from test.baseall"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql15), - "OUTPUT EXPRS:\n CASE WHEN CASE WHEN substr(`k7`, 2, 1) THEN TRUE ELSE FALSE END THEN 2" - + " WHEN FALSE THEN 3 ELSE 0 END")); - - // 1.6 test when expr is null - String sql16 = "select case when null then 1 else 2 end as col16;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql16), - "constant exprs: \n 2")); - - // test 2: case xxx when then - // 2.1 test equal - String sql2 = "select case 1 when 1 then 'a' when 2 then 'b' else 'other' end as col2;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql2), - "constant exprs: \n 'a'")); - - // 2.1.2 test not equal - String sql212 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case 'a' when 1 then 'a' when 'a' then 'b' else 'other' end as col212;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql212), - "constant exprs: \n 'b'")); - - // 2.2 test return null - String sql22 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case 'a' when 1 then 'a' when 'b' then 'b' end as col22;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql22), - "constant exprs: \n NULL")); - - // 2.2.2 test return else - String sql222 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case 1 when 2 then 'a' when 3 then 'b' else 'other' end as col222;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql222), - "constant exprs: \n 'other'")); - - // 2.3 test can not convert to constant,middle when expr is not constant - String sql23 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case 'a' when 'b' then 'a' when substr(k7,2,1) then 2 when false then 3" - + " else 0 end as col23 from test.baseall"; - String a = getSQLPlanOrErrorMsg("explain " + sql23); - Assert.assertTrue(StringUtils.containsIgnoreCase(a, - "OUTPUT EXPRS:\n CASE 'a' WHEN substr(`k7`, 2, 1) THEN '2' WHEN '0' THEN '3' ELSE '0' END")); - - // 2.3.1 first when expr is not constant - String sql231 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case 'a' when substr(k7,2,1) then 2 when 1 then 'a' when false then 3 else 0 end" - + " as col231 from test.baseall"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql231), - "OUTPUT EXPRS:\n CASE 'a' WHEN substr(`k7`, 2, 1) THEN '2' WHEN '1' THEN 'a' WHEN '0'" - + " THEN '3' ELSE '0' END")); - - // 2.3.2 case expr is not constant - String sql232 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case k1 when substr(k7,2,1) then 2 when 1 then 'a' when false then 3 else 0 end" - + " as col232 from test.baseall"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql232), - "OUTPUT EXPRS:\n CASE `k1` WHEN substr(`k7`, 2, 1) THEN '2' WHEN '1' THEN 'a' " - + "WHEN '0' THEN '3' ELSE '0' END")); - - // 3.1 test float,float in case expr - String sql31 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case cast(100 as float) when 1 then 'a' when 2 then 'b' else 'other' end as col31;"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql31), - "constant exprs: \n CASE 100 WHEN 1 THEN 'a' WHEN 2 THEN 'b' ELSE 'other' END")); - - // 4.1 test null in case expr return else - String sql41 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case null when 1 then 'a' when 2 then 'b' else 'other' end as col41"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql41), - "constant exprs: \n 'other'")); - - // 4.1.2 test null in case expr return null - String sql412 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case null when 1 then 'a' when 2 then 'b' end as col41"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql412), - "constant exprs: \n NULL")); - - // 4.2.1 test null in when expr - String sql421 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case 'a' when null then 'a' else 'other' end as col421"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql421), - "constant exprs: \n 'other'")); - - // 5.1 test same type in then expr and else expr - String sql51 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case when 132 then k7 else 'all' end as col51 from test.baseall group by col51"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql51), - "CASE WHEN 132 THEN `k7` ELSE 'all' END")); - - // 5.2 test same type in then expr and else expr - String sql52 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case when 2 < 1 then 'all' else k7 end as col52 from test.baseall group by col52"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql52), - "`k7`")); - - // 5.3 test different type in then expr and else expr, and return CastExpr - String sql53 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case when 2 < 1 then 'all' else k1 end as col53 from test.baseall group by col53"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql53), - "`k1`")); - - // 5.4 test return CastExpr with other SlotRef in selectListItem - String sql54 = "select /*+ SET_VAR(enable_nereids_planner=false) */ k2, case when 2 < 1 then 'all' else k1 end as col54, k7 from test.baseall" - + " group by k2, col54, k7"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql54), - "OUTPUT EXPRS:\n `k2`\n `k1`\n `k7`")); - - // 5.5 test return CastExpr> with other SlotRef in selectListItem - String sql55 = "select /*+ SET_VAR(enable_nereids_planner=false) */ case when 2 < 1 then 'all' else cast(k1 as int) end as col55, k7 from" - + " test.baseall group by col55, k7"; - Assert.assertTrue(StringUtils.containsIgnoreCase(getSQLPlanOrErrorMsg("explain " + sql55), - "OUTPUT EXPRS:\n CAST(`k1` AS INT)\n `k7`")); - } - - @Test - public void testJoinPredicateTransitivityWithSubqueryInWhereClause() throws Exception { - connectContext.setDatabase("test"); - String sql = "SELECT *\n" - + "FROM test.pushdown_test\n" - + "WHERE 0 < (\n" - + " SELECT MAX(k9)\n" + " FROM test.pushdown_test);"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PLAN FRAGMENT")); - Assert.assertTrue(explainString.contains("NESTED LOOP JOIN")); - Assert.assertTrue(!explainString.contains("PREDICATES") || explainString.contains("PREDICATES: TRUE")); - } - - @Test - public void testDistinctPushDown() throws Exception { - connectContext.setDatabase("test"); - String sql = "select distinct k1 from (select distinct k1 from test.pushdown_test) t where k1 > 1"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assert.assertTrue(explainString.contains("PLAN FRAGMENT")); - } - - @Test - public void testConstInPartitionPrune() throws Exception { - FeConstants.runningUnitTest = true; - String queryStr = "explain select * from (select 'aa' as kk1, sum(id) from test.join1 where dt = 9" - + " group by kk1)tt where kk1 in ('aa');"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - FeConstants.runningUnitTest = false; - Assert.assertTrue(explainString.contains("partitions=1/1")); - } - - @Test - public void testOrCompoundPredicateFold() throws Exception { - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from baseall where (k1 > 1) or (k1 > 1 and k2 < 1)"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("`k1` > 1")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from baseall where (k1 > 1 and k2 < 1) or (k1 > 1)"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("`k1` > 1")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from baseall where (k1 > 1) or (k1 > 1)"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("`k1` > 1")); - } - - @Test - public void testColocateJoin() throws Exception { - FeConstants.runningUnitTest = true; - - String queryStr = "explain select * from test.colocate1 t1, test.colocate2 t2 where t1.k1 = t2.k1 and" - + " t1.k2 = t2.k2 and t1.k3 = t2.k3"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains(ColocatePlanTest.COLOCATE_ENABLE)); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.colocate1 t1 join [shuffle] test.colocate2 t2 on t1.k1 = t2.k1 and t1.k2 = t2.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains(ColocatePlanTest.COLOCATE_ENABLE)); - - // t1.k1 = t2.k2 not same order with distribute column - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.colocate1 t1, test.colocate2 t2 where t1.k1 = t2.k2 and t1.k2 = t2.k1 and t1.k3 = t2.k3"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains(ColocatePlanTest.COLOCATE_ENABLE)); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.colocate1 t1, test.colocate2 t2 where t1.k2 = t2.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains(ColocatePlanTest.COLOCATE_ENABLE)); - } - - @Test - public void testSelfColocateJoin() throws Exception { - FeConstants.runningUnitTest = true; - - // single partition - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.jointest t1, test.jointest t2 where t1.k1 = t2.k1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains(ColocatePlanTest.COLOCATE_ENABLE)); - - // multi partition, should not be colocate - queryStr = "explain select * from test.dynamic_partition t1, test.dynamic_partition t2 where t1.k1 = t2.k1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains(ColocatePlanTest.COLOCATE_ENABLE)); - } - - @Test - public void testBucketShuffleJoin() throws Exception { - FeConstants.runningUnitTest = true; - // enable bucket shuffle join - Deencapsulation.setField(connectContext.getSessionVariable(), "enableBucketShuffleJoin", true); - - // set data size and row count for the olap table - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("test"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("bucket_shuffle1"); - for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(10000); - for (Tablet tablet : mIndex.getTablets()) { - for (Replica replica : tablet.getReplicas()) { - replica.updateVersion(2); - } - } - } - } - - db = Env.getCurrentInternalCatalog().getDbOrMetaException("test"); - tbl = (OlapTable) db.getTableOrMetaException("bucket_shuffle2"); - for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(10000); - for (Tablet tablet : mIndex.getTablets()) { - for (Replica replica : tablet.getReplicas()) { - replica.updateVersion(2); - } - } - } - } - - // single partition - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.jointest t1, test.bucket_shuffle1 t2 where t1.k1 = t2.k1" - + " and t1.k1 = t2.k2"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("BUCKET_SHFFULE")); - Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); - - // not bucket shuffle join do not support different type - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.jointest t1, test.bucket_shuffle1 t2 where cast (t1.k1 as tinyint)" - + " = t2.k1 and t1.k1 = t2.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); - - // left table distribution column not match - queryStr = "explain select * from test.jointest t1, test.bucket_shuffle1 t2 where t1.k1 = t2.k1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); - - // multi partition, should not be bucket shuffle join - queryStr = "explain select * from test.jointest t1, test.bucket_shuffle2 t2 where t1.k1 = t2.k1" - + " and t1.k1 = t2.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); - - // left table is colocate table, should be bucket shuffle - queryStr = "explain select * from test.colocate1 t1, test.bucket_shuffle2 t2 where t1.k1 = t2.k1" - + " and t1.k1 = t2.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE")); - - // support recurse of bucket shuffle join - // TODO: support the UT in the future - queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2" - + " on t1.k1 = t2.k1 and t1.k1 = t2.k2 join test.colocate1 t3" - + " on t2.k1 = t3.k1 and t2.k2 = t3.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - // Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); - // Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t3`.`k1`, `t3`.`k2`")); - - // support recurse of bucket shuffle because t4 join t2 and join column name is same as t2 distribute column name - queryStr = "explain select * from test.jointest t1 join test.bucket_shuffle1 t2 on t1.k1 = t2.k1 and" - + " t1.k1 = t2.k2 join test.colocate1 t3 on t2.k1 = t3.k1 join test.jointest t4 on t4.k1 = t2.k1 and" - + " t4.k1 = t2.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - //Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); - //Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t4`.`k1`, `t4`.`k1`")); - - // some column name in join expr t3 join t4 and t1 distribute column name, so should not be bucket shuffle join - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.jointest t1 join test.bucket_shuffle1 t2 on t1.k1 = t2.k1 and t1.k1 =" - + " t2.k2 join test.colocate1 t3 on t2.k1 = t3.k1 join test.jointest t4 on t4.k1 = t3.k1 and" - + " t4.k2 = t3.k2"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t1`.`k1`, `t1`.`k1`")); - Assert.assertTrue(!explainString.contains("BUCKET_SHFFULE_HASH_PARTITIONED: `t4`.`k1`, `t4`.`k1`")); - - // here only a bucket shuffle + broadcast jost join - queryStr = "explain SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * FROM test.bucket_shuffle1 T LEFT JOIN test.bucket_shuffle1 T1 ON T1.k2 = T.k1 and T.k2 = T1.k3 LEFT JOIN" - + " test.bucket_shuffle2 T2 ON T2.k2 = T1.k1 and T2.k1 = T1.k2;"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("BUCKET_SHFFULE")); - Assert.assertTrue(explainString.contains("BROADCAST")); - // disable bucket shuffle join again - Deencapsulation.setField(connectContext.getSessionVariable(), "enableBucketShuffleJoin", false); - } - - @Test - public void testJoinWithMysqlTable() throws Exception { - connectContext.setDatabase("test"); - - // set data size and row count for the olap table - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("test"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("jointest"); - for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(10000); - for (Tablet tablet : mIndex.getTablets()) { - for (Replica replica : tablet.getReplicas()) { - replica.updateVersion(2); - } - } - } - } - - // disable bucket shuffle join - Deencapsulation.setField(connectContext.getSessionVariable(), "enableBucketShuffleJoin", false); - - String queryStr = "explain select * from mysql_table t2, jointest t1 where t1.k1 = t2.k1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(BROADCAST)")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "SCAN MYSQL")); - - queryStr = "explain select * from jointest t1, mysql_table t2 where t1.k1 = t2.k1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(BROADCAST)")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "SCAN MYSQL")); - - queryStr = "explain select * from jointest t1, mysql_table t2, mysql_table t3 where t1.k1 = t3.k1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("INNER JOIN(PARTITIONED)")); - - // should clear the jointest table to make sure do not affect other test - for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(0); - for (Tablet tablet : mIndex.getTablets()) { - for (Replica replica : tablet.getReplicas()) { - replica.updateVersion(2); - } - } - } - } - } - - @Test - public void testJoinWithOdbcTable() throws Exception { - connectContext.setDatabase("test"); - - // set data size and row count for the olap table - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("test"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("jointest"); - for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(10000); - for (Tablet tablet : mIndex.getTablets()) { - for (Replica replica : tablet.getReplicas()) { - replica.updateVersion(2); - } - } - } - } - - // disable bucket shuffle join - Deencapsulation.setField(connectContext.getSessionVariable(), "enableBucketShuffleJoin", false); - String queryStr = "explain select * from odbc_mysql t2, jointest t1 where t1.k1 = t2.k1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(BROADCAST)")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "SCAN ODBC")); - - queryStr = "explain select * from jointest t1, odbc_mysql t2 where t1.k1 = t2.k1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(BROADCAST)")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "SCAN ODBC")); - - queryStr = "explain select * from jointest t1, odbc_mysql t2, odbc_mysql t3 where t1.k1 = t3.k1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("INNER JOIN(PARTITIONED)")); - - // should clear the jointest table to make sure do not affect other test - for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { - mIndex.setRowCount(0); - for (Tablet tablet : mIndex.getTablets()) { - for (Replica replica : tablet.getReplicas()) { - replica.updateVersion(2); - } - } - } - } - } - - @Disabled - public void testPushDownOfOdbcTable() throws Exception { - connectContext.setDatabase("test"); - - // MySQL ODBC table can push down all filter - String queryStr = "explain select * from odbc_mysql where k1 > 10 and abs(k1) > 10"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("`k1` > 10")); - Assert.assertTrue(explainString.contains("abs(`k1`) > 10")); - - // now we do not support odbc scan node push down function call, except MySQL ODBC table - // this table is Oracle ODBC table, so abs(k1) should not be pushed down - queryStr = "explain select * from odbc_oracle where k1 > 10 and abs(k1) > 10"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("\"K1\" > 10")); - Assert.assertTrue(!explainString.contains("abs(k1) > 10")); - } - - @Test - public void testLimitOfExternalTable() throws Exception { - connectContext.setDatabase("test"); - - // ODBC table (MySQL) - String queryStr = "explain select * from odbc_mysql where k1 > 10 and abs(k1) > 10 limit 10"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("LIMIT 10")); - - // ODBC table (Oracle) not push down limit - queryStr = "explain select * from odbc_oracle where k1 > 10 and abs(k1) > 10 limit 10"; - explainString = getSQLPlanOrErrorMsg(queryStr); - // abs is function, so Doris do not push down function except MySQL Database - // so should not push down limit operation - Assert.assertTrue(!explainString.contains("ROWNUM <= 10")); - - // ODBC table (Oracle) push down limit - queryStr = "explain select * from odbc_oracle where k1 > 10 limit 10"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("ROWNUM <= 10")); - - // MySQL table - queryStr = "explain select * from mysql_table where k1 > 10 and abs(k1) > 10 limit 10"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("LIMIT 10")); - } - - @Test - public void testOdbcSink() throws Exception { - connectContext.setDatabase("test"); - - // insert into odbc_oracle table - String queryStr = "explain insert into odbc_oracle select * from odbc_mysql"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("TABLENAME IN DORIS: odbc_oracle")); - Assert.assertTrue(explainString.contains("TABLE TYPE: ORACLE")); - Assert.assertTrue(explainString.contains("TABLENAME OF EXTERNAL TABLE: \"TBL1\"")); - - // enable transaction of ODBC Sink - Deencapsulation.setField(connectContext.getSessionVariable(), "enableOdbcTransaction", true); - queryStr = "explain insert into odbc_oracle select * from odbc_mysql"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("EnableTransaction: true")); - } - - @Test - public void testPreferBroadcastJoin() throws Exception { - connectContext.setDatabase("test"); - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from (select k2 from jointest)t2, jointest t1 where t1.k1 = t2.k2"; - // disable bucket shuffle join - Deencapsulation.setField(connectContext.getSessionVariable(), "enableBucketShuffleJoin", false); - - // default set PreferBroadcastJoin true - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(BROADCAST)")); - - connectContext.getSessionVariable().setPreferJoinMethod("shuffle"); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(PARTITIONED)")); - - connectContext.getSessionVariable().setPreferJoinMethod("broadcast"); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("INNER JOIN(BROADCAST)")); - } - - @Test - public void testRuntimeFilterMode() throws Exception { - connectContext.setDatabase("test"); - - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from jointest t2, jointest t1 where t1.k1 = t2.k1"; - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "LOCAL"); - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("runtime filter")); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "REMOTE"); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("runtime filter")); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "OFF"); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("runtime filter")); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "GLOBAL"); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("runtime filter")); - - queryStr = "explain select * from jointest t2, jointest t1 where t1.k1 <=> t2.k1"; - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "LOCAL"); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 15); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("runtime filter")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from jointest as a where k1 = (select count(1) from jointest as b" - + " where a.k1 = b.k1);"; - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "GLOBAL"); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 15); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("runtime filter")); - } - - @Test - public void testRuntimeFilterType() throws Exception { - connectContext.setDatabase("test"); - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from jointest t2, jointest t1 where t1.k1 = t2.k1"; - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "GLOBAL"); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 0); - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("runtime filter")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 1); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 2); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 3); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] <- `t1`.`k1`")); - - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 4); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[min_max] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 5); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 6); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 7); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[min_max] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 8); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in_or_bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 9); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[in_or_bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 10); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[in_or_bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 11); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[in_or_bloom] -> `t2`.`k1`")); - - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 12); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[min_max] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[in_or_bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 13); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[in_or_bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 14); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[min_max] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[in_or_bloom] -> `t2`.`k1`")); - - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", 15); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[min_max] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF003[in_or_bloom] <- `t1`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF001[bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF002[min_max] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF003[in_or_bloom] -> `t2`.`k1`")); - - // support merge in filter, and forbidden implicit conversion to bloom filter - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from jointest t2 join [shuffle] jointest t1 where t1.k1 = t2.k1"; - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "GLOBAL"); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", TRuntimeFilterType.IN.getValue()); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in] <- `t1`.`k1`")); - Assert.assertFalse(explainString.contains("RF000[bloom] -> `t2`.`k1`")); - Assert.assertFalse(explainString.contains("RF000[bloom] <- `t1`.`k1`")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from jointest t2 join [shuffle] jointest t1 where t1.k1 = t2.k1"; - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterMode", "GLOBAL"); - Deencapsulation.setField(connectContext.getSessionVariable(), "runtimeFilterType", TRuntimeFilterType.IN_OR_BLOOM.getValue()); - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("RF000[in_or_bloom] -> `t2`.`k1`")); - Assert.assertTrue(explainString.contains("RF000[in_or_bloom] <- `t1`.`k1`")); - } - - @Test - public void testEmptyNode() throws Exception { - connectContext.setDatabase("test"); - String emptyNode = "EMPTYSET"; - - List sqls = Lists.newArrayList(); - sqls.add("explain select * from baseall limit 0"); - sqls.add("explain select count(*) from baseall limit 0;"); - sqls.add("explain select k3, dense_rank() OVER () AS rank FROM baseall limit 0;"); - sqls.add("explain select rank from (select k3, dense_rank() OVER () AS rank FROM baseall) a limit 0;"); - sqls.add("explain select * from baseall join bigtable as b limit 0"); - - sqls.add("explain select * from baseall where 1 = 2"); - sqls.add("explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from baseall where null = null"); - sqls.add("explain select count(*) from baseall where 1 = 2;"); - sqls.add("explain select /*+ SET_VAR(enable_nereids_planner=false) */ k3, dense_rank() OVER () AS rank FROM baseall where 1 =2;"); - sqls.add("explain select rank from (select k3, dense_rank() OVER () AS rank FROM baseall) a where 1 =2;"); - sqls.add("explain select * from baseall join bigtable as b where 1 = 2"); - - for (String sql : sqls) { - String explainString = getSQLPlanOrErrorMsg(sql); - Assert.assertTrue(explainString.contains(emptyNode)); - } - } - - @Test - public void testInformationFunctions() throws Exception { - connectContext.setDatabase("test"); - Analyzer analyzer = new Analyzer(connectContext.getEnv(), connectContext); - InformationFunction infoFunc = new InformationFunction("database"); - infoFunc.analyze(analyzer); - Assert.assertEquals("test", infoFunc.getStrValue()); - - infoFunc = new InformationFunction("user"); - infoFunc.analyze(analyzer); - Assert.assertEquals("'root'@'127.0.0.1'", infoFunc.getStrValue()); - - infoFunc = new InformationFunction("current_user"); - infoFunc.analyze(analyzer); - Assert.assertEquals("'root'@'%'", infoFunc.getStrValue()); - } - - @Test - public void testAggregateSatisfyOlapTableDistribution() throws Exception { - FeConstants.runningUnitTest = true; - connectContext.setDatabase("test"); - String sql = "SELECT dt, dis_key, COUNT(1) FROM table_unpartitioned group by dt, dis_key"; - String explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("AGGREGATE (update finalize)")); - } - - - @Test - public void testLeadAndLagFunction() throws Exception { - connectContext.setDatabase("test"); - - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ time_col, lead(query_time, 1, NULL) over () as time2 from test.test1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("lead(`query_time`, 1, NULL)")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ time_col, lead(query_time, 1, 2) over () as time2 from test.test1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("lead(`query_time`, 1, 2)")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ time_col, lead(time_col, 1, '2020-01-01 00:00:00') over () as time2 from test.test1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("lead(`time_col`, 1, '2020-01-01 00:00:00')")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ time_col, lag(query_time, 1, 2) over () as time2 from test.test1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("lag(`query_time`, 1, 2)")); - } - - @Disabled - public void testIntDateTime() throws Exception { - connectContext.setDatabase("test"); - //valid date - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day in ('2020-10-30')"; - String explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion ? "PREDICATES: `day` IN ('2020-10-30')" - : "PREDICATES: `day` IN ('2020-10-30 00:00:00')")); - //valid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day in ('2020-10-30','2020-10-29')"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion - ? "PREDICATES: `day` IN ('2020-10-30', '2020-10-29')" - : "PREDICATES: `day` IN ('2020-10-30 00:00:00', '2020-10-29 00:00:00')")); - - //valid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date in ('2020-10-30 12:12:30')"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` IN ('2020-10-30 12:12:30')")); - //valid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date in ('2020-10-30')"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` IN ('2020-10-30 00:00:00')")); - - //int date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day in (20201030)"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `day` IN ('2020-10-30 00:00:00')")); - //int datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date in (20201030)"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` IN ('2020-10-30 00:00:00')")); - } - - @Test - public void testOutJoinSmapReplace() throws Exception { - connectContext.setDatabase("test"); - //valid date - String sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.aid, b.bid FROM (SELECT 3 AS aid) a right outer JOIN (SELECT 4 AS bid) b ON (a.aid=b.bid)"; - assertSQLPlanOrErrorMsgContains(sql, "OUTPUT EXPRS:\n" + " 3\n" + " 4"); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.aid, b.bid FROM (SELECT 3 AS aid) a left outer JOIN (SELECT 4 AS bid) b ON (a.aid=b.bid)"; - assertSQLPlanOrErrorMsgContains(sql, "OUTPUT EXPRS:\n" + " 3\n" + " 4"); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.aid, b.bid FROM (SELECT 3 AS aid) a full outer JOIN (SELECT 4 AS bid) b ON (a.aid=b.bid)"; - assertSQLPlanOrErrorMsgContains(sql, "OUTPUT EXPRS:\n" + " 3\n" + " 4"); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.aid, b.bid FROM (SELECT 3 AS aid) a JOIN (SELECT 4 AS bid) b ON (a.aid=b.bid)"; - assertSQLPlanOrErrorMsgContains(sql, "OUTPUT EXPRS:\n" + " 3\n" + " 4"); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.k1, b.k2 FROM (SELECT k1 from baseall) a LEFT OUTER JOIN (select k1, 999 as k2 from baseall) b ON (a.k1=b.k1)"; - assertSQLPlanOrErrorMsgContains(sql, " `k1`\n" + " 999"); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.k1, b.k2 FROM (SELECT 1 as k1 from baseall) a RIGHT OUTER JOIN (select k1, 999 as k2 from baseall) b ON (a.k1=b.k1)"; - assertSQLPlanOrErrorMsgContains(sql, " 1\n" + " 999"); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.k1, b.k2 FROM (SELECT 1 as k1 from baseall) a FULL JOIN (select k1, 999 as k2 from baseall) b ON (a.k1=b.k1)"; - assertSQLPlanOrErrorMsgContains(sql, " 1\n" + " 999"); - } - - @Test - public void testFromUnixTimeRewrite() throws Exception { - connectContext.setDatabase("test"); - //default format - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where from_unixtime(query_time) > '2021-03-02 10:01:28'"; - String explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("(`query_time` <= 253402271999) AND (`query_time` > 1614650488)")); - } - - @Disabled - public void testCheckInvalidDate() throws Exception { - FeConstants.runningUnitTest = true; - connectContext.setDatabase("test"); - //valid date - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = '2020-10-30'"; - String explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion ? "PREDICATES: `day` = '2020-10-30'" - : "PREDICATES: `day` = '2020-10-30 00:00:00'")); - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = from_unixtime(1196440219)"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `day` = '2007-12-01 00:30:19'")); - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = str_to_date('2014-12-21 12:34:56', '%Y-%m-%d %H:%i:%s');"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `day` = '2014-12-21 12:34:56'")); - //valid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = 20201030"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion ? "PREDICATES: `day` = '2020-10-30'" - : "PREDICATES: `day` = '2020-10-30 00:00:00'")); - //valid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = '20201030'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion ? "PREDICATES: `day` = '2020-10-30'" - : "PREDICATES: `day` = '2020-10-30 00:00:00'")); - //valid date contains micro second - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = '2020-10-30 10:00:01.111111'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion - ? "VEMPTYSET" - : "PREDICATES: `day` = '2020-10-30 10:00:01'")); - //invalid date - - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = '2020-10-32'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: '2020-10-32' in expression: `day` = '2020-10-32'")); - - //invalid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = '20201032'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: '20201032' in expression: `day` = '20201032'")); - //invalid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = 20201032"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: 20201032 in expression: `day` = 20201032")); - //invalid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = 'hello'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: 'hello' in expression: `day` = 'hello'")); - //invalid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = 2020-10-30"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: 1980 in expression: `day` = 1980")); - //invalid date - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where day = 10-30"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: -20 in expression: `day` = -20")); - //valid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-30 12:12:30'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2020-10-30 12:12:30'")); - //valid datetime, support parsing to minute - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-30 12:12'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2020-10-30 12:12:00'")); - //valid datetime, support parsing to hour - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-30 12'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2020-10-30 12:00:00'")); - //valid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = 20201030"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2020-10-30 00:00:00'")); - //valid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '20201030'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2020-10-30 00:00:00'")); - //valid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-30'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2020-10-30 00:00:00'")); - //valid datetime contains micro second - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-30 10:00:01.111111'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains(Config.enable_date_conversion - ? "VEMPTYSET" : "PREDICATES: `date` = '2020-10-30 10:00:01'")); - //invalid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-32'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: '2020-10-32' in expression: `date` = '2020-10-32'")); - //invalid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = 'hello'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: 'hello' in expression: `date` = 'hello'")); - //invalid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = 2020-10-30"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: 1980 in expression: `date` = 1980")); - //invalid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = 10-30"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: -20 in expression: `date` = -20")); - //invalid datetime - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '2020-10-12 12:23:76'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("Incorrect datetime value: '2020-10-12 12:23:76' in expression: `date` = '2020-10-12 12:23:76'")); - - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '1604031150'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2016-04-03 11:50:00'")); - - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ day from tbl_int_date where date = '1604031150000'"; - explainString = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `date` = '2016-04-03 11:50:00'")); - - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ count(*) from test.baseall where k11 > to_date(now())"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("PREDICATES: `k11` > to_date")); - - queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ count(*) from test.baseall where k11 > '2021-6-1'"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertTrue(explainString.contains("PREDICATES: `k11` > '2021-06-01 00:00:00'")); - } - - @Test - public void testCompoundPredicateWriteRule() throws Exception { - connectContext.setDatabase("test"); - - // false or e ==> e - String sql1 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where 2=-2 OR query_time=0;"; - String explainString1 = getSQLPlanOrErrorMsg("EXPLAIN " + sql1); - Assert.assertTrue(explainString1.contains("`query_time` = 0")); - - //true or e ==> true - String sql2 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where -5=-5 OR query_time=0;"; - String explainString2 = getSQLPlanOrErrorMsg("EXPLAIN " + sql2); - Assert.assertTrue(!explainString2.contains("OR")); - - //e or true ==> true - String sql3 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where query_time=0 OR -5=-5;"; - String explainString3 = getSQLPlanOrErrorMsg("EXPLAIN " + sql3); - Assert.assertTrue(!explainString3.contains("OR")); - - //e or false ==> e - String sql4 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where -5!=-5 OR query_time=0;"; - String explainString4 = getSQLPlanOrErrorMsg("EXPLAIN " + sql4); - Assert.assertTrue(explainString4.contains("`query_time` = 0")); - - - // true and e ==> e - String sql5 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where -5=-5 AND query_time=0;"; - String explainString5 = getSQLPlanOrErrorMsg("EXPLAIN " + sql5); - Assert.assertTrue(explainString5.contains("`query_time` = 0")); - - // e and true ==> e - String sql6 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where query_time=0 AND -5=-5;"; - String explainString6 = getSQLPlanOrErrorMsg("EXPLAIN " + sql6); - Assert.assertTrue(explainString6.contains("`query_time` = 0")); - - // false and e ==> false - String sql7 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where -5!=-5 AND query_time=0;"; - String explainString7 = getSQLPlanOrErrorMsg("EXPLAIN " + sql7); - Assert.assertTrue(!explainString7.contains("FALSE")); - - // e and false ==> false - String sql8 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where query_time=0 AND -5!=-5;"; - String explainString8 = getSQLPlanOrErrorMsg("EXPLAIN " + sql8); - Assert.assertTrue(!explainString8.contains("FALSE")); - - // (false or expr1) and (false or expr2) ==> expr1 and expr2 - String sql9 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where (-2=2 or query_time=2) and (-2=2 or stmt_id=2);"; - String explainString9 = getSQLPlanOrErrorMsg("EXPLAIN " + sql9); - Assert.assertTrue(explainString9.contains("(`query_time` = 2) AND (`stmt_id` = 2)")); - - // false or (expr and true) ==> expr - String sql10 = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.test1 where (2=-2) OR (query_time=0 AND 1=1);"; - String explainString10 = getSQLPlanOrErrorMsg("EXPLAIN " + sql10); - Assert.assertTrue(explainString10.contains("`query_time` = 0")); - } - - @Test - public void testOutfile() throws Exception { - connectContext.setDatabase("test"); - Config.enable_outfile_to_local = true; - createTable("CREATE TABLE test.`outfile1` (\n" - + " `date` date NOT NULL,\n" - + " `road_code` int(11) NOT NULL DEFAULT \"-1\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`date`, `road_code`)\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE(`date`)\n" - + "(PARTITION v2x_ads_lamp_source_percent_statistic_20210929 VALUES [('2021-09-29'), ('2021-09-30')))\n" - + "DISTRIBUTED BY HASH(`road_code`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");"); - - // test after query rewrite, outfile still work - String sql = "select * from test.outfile1 where `date` between '2021-10-07' and '2021-10-11'" - + "INTO OUTFILE \"file:///tmp/1_\" FORMAT AS CSV PROPERTIES (" - + " \"column_separator\" = \",\"," - + " \"line_delimiter\" = \"\\n\"," - + " \"max_file_size\" = \"500MB\" );"; - String explainStr = getSQLPlanOrErrorMsg("EXPLAIN " + sql); - if (Config.enable_date_conversion) { - Assert.assertTrue(explainStr.contains("(`date` >= '2021-10-07') AND" - + " (`date` <= '2021-10-11')")); - } else { - Assert.assertTrue(explainStr.contains("(`date` >= '2021-10-07 00:00:00') AND" - + " (`date` <= '2021-10-11 00:00:00')")); - } - } - - // Fix: issue-#7929 - @Test - public void testEmptyNodeWithOuterJoinAndAnalyticFunction() throws Exception { - // create database - String createDbStmtStr = "create database issue7929;"; - CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbStmtStr); - Env.getCurrentEnv().createDb(createDbStmt); - createTable(" CREATE TABLE issue7929.`t1` (\n" - + " `k1` int(11) NULL COMMENT \"\",\n" - + " `k2` int(11) NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`k1`, `k2`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"); - createTable("CREATE TABLE issue7929.`t2` (\n" - + " `j1` int(11) NULL COMMENT \"\",\n" - + " `j2` int(11) NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`j1`, `j2`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`j1`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"); - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from issue7929.t1 left join (select max(j1) over() as x from issue7929.t2) a" - + " on t1.k1 = a.x where 1 = 0;"; - String explainStr = getSQLPlanOrErrorMsg(sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainStr, 5, "EMPTYSET")); - Assert.assertTrue(explainStr.contains("tuple ids: 5")); - } - - @Ignore - // Open it after fixing issue #7971 - public void testGroupingSetOutOfBoundError() throws Exception { - String createDbStmtStr = "create database issue1111;"; - CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbStmtStr); - Env.getCurrentEnv().createDb(createDbStmt); - createTable("CREATE TABLE issue1111.`test1` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\"\n" - + ");"); - String sql = "SELECT k1 ,GROUPING(k2) FROM issue1111.test1 GROUP BY CUBE (k1) ORDER BY k1"; - String explainStr = getSQLPlanOrErrorMsg(sql, true); - System.out.println(explainStr); - } - - // --begin-- implicit cast in explain verbose - @Disabled - public void testExplainInsertInto() throws Exception { - ExplainTest explainTest = new ExplainTest(); - explainTest.before(connectContext); - explainTest.testExplainInsertInto(); - explainTest.testExplainVerboseInsertInto(); - explainTest.testExplainSelect(); - explainTest.testExplainVerboseSelect(); - explainTest.testExplainConcatSelect(); - explainTest.testExplainVerboseConcatSelect(); - explainTest.after(); - } - // --end-- - - // --begin-- rewrite date literal rule - @Disabled - public void testRewriteDateLiteralRule() throws Exception { - RewriteDateLiteralRuleTest rewriteDateLiteralRuleTest = new RewriteDateLiteralRuleTest(); - rewriteDateLiteralRuleTest.before(connectContext); - rewriteDateLiteralRuleTest.testWithDoubleFormatDate(); - rewriteDateLiteralRuleTest.testWithIntFormatDate(); - rewriteDateLiteralRuleTest.testWithInvalidFormatDate(); - rewriteDateLiteralRuleTest.testWithStringFormatDate(); - rewriteDateLiteralRuleTest.testWithDoubleFormatDateV2(); - rewriteDateLiteralRuleTest.testWithIntFormatDateV2(); - rewriteDateLiteralRuleTest.testWithInvalidFormatDateV2(); - rewriteDateLiteralRuleTest.testWithStringFormatDateV2(); - rewriteDateLiteralRuleTest.after(); - } - // --end-- - - @Test - public void testGroupingSets() throws Exception { - String createDbStmtStr = "create database issue7971;"; - CreateDbStmt createDbStmt = (CreateDbStmt) parseAndAnalyzeStmt(createDbStmtStr); - Env.getCurrentEnv().createDb(createDbStmt); - createTable("CREATE TABLE issue7971.`t` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\",\n" - + " `k3` smallint(6) NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`k1`, `k2`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"); - createTable("CREATE TABLE issue7971.`t1` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k21` smallint(6) NULL COMMENT \"\",\n" - + " `k31` smallint(6) NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`k1`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ")"); - String sql = "SELECT k1, k2, GROUPING(k1), GROUPING(k2), SUM(k3) FROM issue7971.t GROUP BY GROUPING SETS ( (k1, k2), (k2), (k1), ( ) );"; - String explainStr = getSQLPlanOrErrorMsg(sql); - Assert.assertTrue(explainStr.contains("REPEAT_NODE")); - sql = "SELECT k1 ,GROUPING(k2) FROM issue7971.t GROUP BY CUBE (k1) ORDER BY k1;"; - explainStr = getSQLPlanOrErrorMsg(sql); - Assert.assertTrue(explainStr.contains("errCode = 2")); - sql = "select grouping_id(t1.k1), t1.k1, max(k2) from issue7971.t left join issue7971.t1 on t.k3 = t1.k1 group by grouping sets ((k1), ());"; - explainStr = getSQLPlanOrErrorMsg(sql); - Assert.assertTrue(explainStr.contains("REPEAT_NODE")); - } - - @Test - public void testQueryWithUsingClause() throws Exception { - connectContext.setDatabase("test"); - String iSql1 = "explain insert into test.tbl_using_a values(1,3,7),(2,2,8),(3,1,9)"; - String iSql2 = "explain insert into test.tbl_using_b values(1,3,1),(3,1,1),(4,1,1),(5,2,1)"; - getSQLPlanOrErrorMsg(iSql1); - getSQLPlanOrErrorMsg(iSql2); - String qSQL = "explain select t1.* from test.tbl_using_a t1 join test.tbl_using_b t2 using(k1,k2) where t1.k1 " - + "between 1 and 3 and t2.k3 between 1+0 and 3+0"; - try { - getSQLPlanOrErrorMsg(qSQL); - } catch (AnalysisException e) { - Assert.fail(); - } - } - - @Test - public void testResultExprs() throws Exception { - connectContext.setDatabase("test"); - createTable("CREATE TABLE test.result_exprs (\n" - + " `aid` int(11) NULL,\n" - + " `bid` int(11) NULL\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`aid`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`aid`) BUCKETS 7\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_medium\" = \"HDD\",\n" - + "\"storage_format\" = \"V2\"\n" - + ");\n"); - String queryStr = "EXPLAIN VERBOSE INSERT INTO result_exprs\n" + "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ a.aid,\n" + " b.bid\n" + "FROM\n" - + " (SELECT 3 AS aid)a\n" + "RIGHT JOIN\n" + " (SELECT 4 AS bid)b ON (a.aid=b.bid)\n"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assert.assertFalse(explainString.contains("OUTPUT EXPRS:\n 3\n 4")); - System.out.println(explainString); - Assert.assertTrue(explainString.contains( - "OUTPUT EXPRS:\n" + " CAST( 3 AS int)\n" + " CAST( 4 AS int)")); - } - - @Test - public void testInsertIntoSelect() throws Exception { - connectContext.setDatabase("test"); - createTable("CREATE TABLE test.`decimal_tb` (\n" - + " `k1` decimal(1, 0) NULL COMMENT \"\",\n" - + " `v1` decimal(1, 0) SUM NULL COMMENT \"\",\n" - + " `v2` decimal(1, 0) MAX NULL COMMENT \"\",\n" - + " `v3` decimal(1, 0) MIN NULL COMMENT \"\",\n" - + " `v4` decimal(1, 0) REPLACE NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`k1`)\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\"\n" - + ")"); - String sql = "explain insert into test.decimal_tb select 1, 1, 1, 1, 1;"; - String explainString = getSQLPlanOrErrorMsg(sql); - Assert.assertTrue(explainString.contains("1 | 1 | 1 | 1 | 1")); - } - - @Test - public void testOutJoinWithOnFalse() throws Exception { - connectContext.setDatabase("test"); - createTable("create table out_join_1\n" - + "(\n" - + " k1 int,\n" - + " v int\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" - + "PROPERTIES(\"replication_num\" = \"1\");"); - - createTable("create table out_join_2\n" - + "(\n" - + " k1 int,\n" - + " v int\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" - + "PROPERTIES(\"replication_num\" = \"1\");"); - - String sql = "explain select * from out_join_1 left join out_join_2 on out_join_1.k1 = out_join_2.k1 and 1=2;"; - String explainString = getSQLPlanOrErrorMsg(sql); - Assert.assertFalse(explainString.contains("non-equal LEFT OUTER JOIN is not supported")); - - sql = "explain select * from out_join_1 right join out_join_2 on out_join_1.k1 = out_join_2.k1 and 1=2;"; - explainString = getSQLPlanOrErrorMsg(sql); - Assert.assertFalse(explainString.contains("non-equal RIGHT OUTER JOIN is not supported")); - - sql = "explain select * from out_join_1 full join out_join_2 on out_join_1.k1 = out_join_2.k1 and 1=2;"; - explainString = getSQLPlanOrErrorMsg(sql); - Assert.assertFalse(explainString.contains("non-equal FULL OUTER JOIN is not supported")); - - } - - @Test - public void testDefaultJoinReorder() throws Exception { - connectContext.setDatabase("test"); - createTable("CREATE TABLE t1 (col1 varchar, col2 varchar, col3 int)\n" + "DISTRIBUTED BY HASH(col3)\n" - + "BUCKETS 3\n" + "PROPERTIES(\n" + " \"replication_num\"=\"1\"\n" + ");"); - createTable("CREATE TABLE t2 (col1 varchar, col2 varchar, col3 int)\n" + "DISTRIBUTED BY HASH(col3)\n" - + "BUCKETS 3\n" + "PROPERTIES(\n" + " \"replication_num\"=\"1\"\n" + ");"); - createTable("CREATE TABLE t3 (col1 varchar, col2 varchar, col3 int)\n" + "DISTRIBUTED BY HASH(col3)\n" - + "BUCKETS 3\n" + "PROPERTIES(\n" + " \"replication_num\"=\"1\"\n" + ");"); - String sql = "explain select x.col2 from t1,t2,t3 x,t3 y " - + "where x.col1=t2.col1 and y.col1=t2.col2 and t1.col1=y.col1"; - String explainString = getSQLPlanOrErrorMsg(sql); - Assert.assertFalse(explainString.contains("CROSS JOIN")); - - } - - @Test - public void testDefaultJoinReorderWithView() throws Exception { - connectContext.setDatabase("test"); - createTable("CREATE TABLE t_1 (col1 varchar, col2 varchar, col3 int)\n" + "DISTRIBUTED BY HASH(col3)\n" - + "BUCKETS 3\n" + "PROPERTIES(\n" + " \"replication_num\"=\"1\"\n" + ");"); - createTable("CREATE TABLE t_2 (col1 varchar, col2 varchar, col3 int)\n" + "DISTRIBUTED BY HASH(col3)\n" - + "BUCKETS 3\n" + "PROPERTIES(\n" + " \"replication_num\"=\"1\"\n" + ");"); - createView("CREATE VIEW v_1 as select col1 from t_1;"); - createView("CREATE VIEW v_2 as select x.col2 from (select t_2.col2, 1 + 1 from t_2) x;"); - - String sql = "explain select t_1.col2, v_1.col1 from t_1 inner join t_2 on t_1.col1 = t_2.col1 inner join v_1 " - + "on v_1.col1 = t_2.col2 inner join v_2 on v_2.col2 = t_2.col1"; - String explainString = getSQLPlanOrErrorMsg(sql); - System.out.println(explainString); - // errCode = 2, detailMessage = Unknown column 'col2' in 't_2' - Assert.assertFalse(explainString.contains("errCode")); - } - - @Test - public void testKeyOrderError() throws Exception { - Assertions.assertTrue(getSQLPlanOrErrorMsg("CREATE TABLE `test`.`test_key_order` (\n" - + " `k1` tinyint(4) NULL COMMENT \"\",\n" - + " `k2` smallint(6) NULL COMMENT \"\",\n" - + " `k3` int(11) NULL COMMENT \"\",\n" - + " `v1` double MAX NULL COMMENT \"\",\n" - + " `v2` float SUM NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`k1`, `k3`, `k2`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");").contains("Key columns should be a ordered prefix of the schema. " - + "KeyColumns[1] (starts from zero) is k3, " - + "but corresponding column is k2 in the previous columns declaration.")); - } - - @Test - public void testPreaggregationOfOrthogonalBitmapUDAF() throws Exception { - connectContext.setDatabase("test"); - createTable("CREATE TABLE test.bitmap_tb (\n" - + " `id` int(11) NULL COMMENT \"\",\n" - + " `id2` int(11) NULL COMMENT \"\",\n" - + " `id3` bitmap bitmap_union \n" - + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`id`,`id2`)\n" - + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" - + "PROPERTIES (\n" - + " \"replication_num\" = \"1\"\n" - + ");"); - - String queryBaseTableStr = "explain select id,id2,orthogonal_bitmap_union_count(id3) from test.bitmap_tb t1 group by id,id2"; - String explainString1 = getSQLPlanOrErrorMsg(queryBaseTableStr); - Assert.assertTrue(explainString1.contains("PREAGGREGATION: ON")); - - String queryTableStr = "explain select id,orthogonal_bitmap_union_count(id3) from test.bitmap_tb t1 group by id"; - String explainString2 = getSQLPlanOrErrorMsg(queryTableStr); - Assert.assertTrue(explainString2.contains("PREAGGREGATION: ON")); - } - - @Test - public void testPreaggregationOfHllUnion() throws Exception { - connectContext.setDatabase("test"); - createTable("create table test.test_hll(\n" - + " dt date,\n" - + " id int,\n" - + " name char(10),\n" - + " province char(10),\n" - + " os char(10),\n" - + " pv hll hll_union\n" - + ")\n" - + "Aggregate KEY (dt,id,name,province,os)\n" - + "distributed by hash(id) buckets 10\n" - + "PROPERTIES(\n" - + " \"replication_num\" = \"1\",\n" - + " \"in_memory\"=\"false\"\n" - + ");"); - - String queryBaseTableStr = "explain select dt, hll_union(pv) from test.test_hll group by dt"; - String explainString = getSQLPlanOrErrorMsg(queryBaseTableStr); - Assert.assertTrue(explainString.contains("PREAGGREGATION: ON")); - } - - /* - NOTE: - explainString.contains("PREDICATES: xxx\n") - add '\n' at the end of line to ensure there are no other predicates - */ - @Test - public void testRewriteOrToIn() throws Exception { - connectContext.setDatabase("test"); - connectContext.getSessionVariable().setEnableRewriteElementAtToSlot(false); - String sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where query_time = 1 or query_time = 2 or query_time in (3, 4)"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `query_time` IN (1, 2, 3, 4)\n")); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2) and query_time in (3, 4)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: (`query_time` IN (1, 2) AND `query_time` IN (3, 4))\n")); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2 or scan_bytes = 2) and scan_bytes in (2, 3)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: ((`query_time` IN (1, 2) OR (`scan_bytes` = 2)) AND `scan_bytes` IN (2, 3))\n")); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2) and (scan_bytes = 2 or scan_bytes = 3)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: (`query_time` IN (1, 2) AND `scan_bytes` IN (2, 3))\n")); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where query_time = 1 or query_time = 2 or query_time = 3 or query_time = 1"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `query_time` IN (1, 2, 3)\n")); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where query_time = 1 or query_time = 2 or query_time in (3, 2)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `query_time` IN (1, 2, 3)\n")); - - connectContext.getSessionVariable().setRewriteOrToInPredicateThreshold(100); - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where query_time = 1 or query_time = 2 or query_time in (3, 4)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: (((`query_time` = 1) OR (`query_time` = 2)) OR `query_time` IN (3, 4))\n")); - connectContext.getSessionVariable().setRewriteOrToInPredicateThreshold(2); - - sql = "SELECT /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2) and query_time in (3, 4)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: (`query_time` IN (1, 2) AND `query_time` IN (3, 4))\n")); - - //test we can handle `!=` and `not in` - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2 or query_time!= 3 or query_time not in (5, 6))"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: (`query_time` IN (1, 2) OR ((`query_time` != 3) OR `query_time` NOT IN (5, 6)))\n")); - - //test we can handle merge 2 or more columns - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2 or scan_rows = 3 or scan_rows = 4)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: (`query_time` IN (1, 2) OR `scan_rows` IN (3, 4))")); - - //merge in-pred or in-pred - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (query_time = 1 or query_time = 2 or query_time = 3 or query_time = 4)"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains("PREDICATES: `query_time` IN (1, 2, 3, 4)\n")); - - //rewrite recursively - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 " - + "where query_id=client_ip " - + " and (stmt_id=1 or stmt_id=2 or stmt_id=3 " - + " or (user='abc' and (state = 'a' or state='b' or state in ('c', 'd'))))" - + " or (db not in ('x', 'y')) "; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains( - "PREDICATES: (((`query_id` = `client_ip`) AND (`stmt_id` IN (1, 2, 3) OR ((`user` = 'abc') " - + "AND `state` IN ('a', 'b', 'c', 'd')))) OR (`db` NOT IN ('x', 'y')))\n")); - - //ExtractCommonFactorsRule may generate more expr, test the rewriteOrToIn applied on generated exprs - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from test1 where (stmt_id=1 and state='a') or (stmt_id=2 and state='b')"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, "EXPLAIN " + sql); - Assert.assertTrue(explainString.contains( - "PREDICATES: ((`state` IN ('a', 'b') AND `stmt_id` IN (1, 2)) AND (((`stmt_id` = 1) AND " - + "(`state` = 'a')) OR ((`stmt_id` = 2) AND (`state` = 'b'))))\n" - )); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/RepeatNodeTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/RepeatNodeTest.java deleted file mode 100644 index 9a9b1a3b8c5fec..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/RepeatNodeTest.java +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.utframe.TestWithFeService; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class RepeatNodeTest extends TestWithFeService { - - @Override - protected void runBeforeAll() throws Exception { - createDatabase("testdb"); - useDatabase("testdb"); - createTable(" CREATE TABLE `testdb`.`mycost` (\n" + " `id` tinyint(4) NULL,\n" + " `name` varchar(20) NULL,\n" - + " `date` date NULL,\n" + " `cost` bigint(20) SUM NULL\n" + ") ENGINE=OLAP\n" - + "AGGREGATE KEY(`id`, `name`, `date`)\n" + "COMMENT 'OLAP'\n" + "PARTITION BY RANGE(`date`)\n" - + "(PARTITION p2020 VALUES [('0000-01-01'), ('2021-01-01')),\n" - + "PARTITION p2021 VALUES [('2021-01-01'), ('2022-01-01')),\n" - + "PARTITION p2022 VALUES [('2022-01-01'), ('2023-01-01')))\n" + "DISTRIBUTED BY HASH(`id`) BUCKETS 8\n" - + "PROPERTIES (\n" + "\"replication_allocation\" = \"tag.location.default: 1\",\n" - + "\"in_memory\" = \"false\",\n" + "\"storage_format\" = \"V2\"\n" + ");"); - - createTable( - " CREATE TABLE `testdb`.`mypeople` (\n" + " `id` bigint(20) NULL,\n" + " `name` varchar(20) NULL,\n" - + " `sex` varchar(10) NULL,\n" + " `age` int(11) NULL,\n" + " `phone` char(15) NULL,\n" - + " `address` varchar(50) NULL\n" + ") ENGINE=OLAP\n" + "DUPLICATE KEY(`id`, `name`)\n" - + "COMMENT 'OLAP'\n" + "DISTRIBUTED BY HASH(`id`) BUCKETS 8\n" + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" + ");"); - } - - @Test - public void testNormal() throws Exception { - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ id, name, sum(cost), grouping_id(id, name) from mycost group by cube(id, name);"; - String explainString = getSQLPlanOrErrorMsg("explain " + sql); - Assertions.assertTrue(explainString.contains("exprs: `id`, `name`, `cost`")); - Assertions.assertTrue(explainString.contains( - "output slots: ``id``, ``name``, ``cost``, ``GROUPING_ID``, ``GROUPING_PREFIX_`id`_`name```")); - } - - @Test - public void testExpr() throws Exception { - String sql1 = "select /*+ SET_VAR(enable_nereids_planner=false) */ if(c.id > 0, 1, 0) as id_, p.name, sum(c.cost) from mycost c " - + "join mypeople p on c.id = p.id group by grouping sets((id_, name),());"; - String explainString1 = getSQLPlanOrErrorMsg("explain " + sql1); - System.out.println(explainString1); - Assertions.assertTrue(explainString1.contains( - "output slots: `if((`c`.`id` > 0), 1, 0)`, ``p`.`name``, ``c`.`cost``, ``GROUPING_ID``")); - - String sql2 = "select /*+ SET_VAR(enable_nereids_planner=false) */ (id + 1) id_, name, sum(cost) from mycost group by grouping sets((id_, name),());"; - String explainString2 = getSQLPlanOrErrorMsg("explain " + sql2); - System.out.println(explainString2); - Assertions.assertTrue(explainString2.contains("exprs: ((`id` + 1)), `name`, `cost`")); - Assertions.assertTrue( - explainString2.contains(" output slots: `((`id` + 1))`, ``name``, ``cost``, ``GROUPING_ID``")); - - String sql3 = "select /*+ SET_VAR(enable_nereids_planner=false) */ 1 as id_, name, sum(cost) from mycost group by grouping sets((id_, name),());"; - String explainString3 = getSQLPlanOrErrorMsg("explain " + sql3); - System.out.println(explainString3); - Assertions.assertTrue(explainString3.contains("exprs: 1, `name`, `cost`")); - Assertions.assertTrue(explainString3.contains("output slots: `1`, ``name``, ``cost``, ``GROUPING_ID``")); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java index 207bddae1b30a6..850d8b27b062af 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java @@ -280,7 +280,7 @@ public void test() throws Exception { Assert.assertEquals(1000000, execMemLimit); List> userProps = Env.getCurrentEnv().getAuth().getUserProperties(Auth.ROOT_USER); - Assert.assertEquals(12, userProps.size()); + Assert.assertEquals(13, userProps.size()); // now : // be1 be2 be3 ==>tag1; diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java deleted file mode 100644 index 98f36842795341..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/TableFunctionPlanTest.java +++ /dev/null @@ -1,554 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.planner; - -import org.apache.doris.analysis.CreateDbStmt; -import org.apache.doris.analysis.CreateTableStmt; -import org.apache.doris.analysis.CreateViewStmt; -import org.apache.doris.catalog.Env; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.utframe.UtFrameUtils; - -import org.apache.commons.io.FileUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.util.UUID; - -public class TableFunctionPlanTest { - private static final String runningDir = "fe/mocked/TableFunctionPlanTest/" + UUID.randomUUID() + "/"; - private static ConnectContext ctx; - - @After - public void tearDown() throws Exception { - FileUtils.deleteDirectory(new File(runningDir)); - } - - @BeforeClass - public static void setUp() throws Exception { - UtFrameUtils.createDorisCluster(runningDir); - ctx = UtFrameUtils.createDefaultCtx(); - String createDbStmtStr = "create database db1;"; - CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, ctx); - Env.getCurrentEnv().createDb(createDbStmt); - // 3. create table tbl1 - String createTblStmtStr = "create table db1.tbl1(k1 int, k2 varchar(1), k3 varchar(1)) " - + "DUPLICATE KEY(k1) distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); - Env.getCurrentEnv().createTable(createTableStmt); - - createTblStmtStr = "create table db1.tbl2(k1 int, k2 varchar(1), v1 bitmap bitmap_union) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); - Env.getCurrentEnv().createTable(createTableStmt); - - createTblStmtStr = "create table db1.table_for_view (k1 int, k2 int, k3 varchar(100)) distributed by hash(k1)" - + "properties('replication_num' = '1');"; - createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr, ctx); - Env.getCurrentEnv().createTable(createTableStmt); - } - - // test planner - /* Case1 normal table function - select k1, e1 from table lateral view explode_split(k2, ",") tmp as e1; - */ - @Test - public void normalTableFunction() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k2, \",\") tmp as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("tuple ids: 0 1")); - Assert.assertTrue(explainString.contains("TupleDescriptor{id=1, tbl=tmp")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1, colUniqueId=-1, type=varchar")); - } - - /* Case2 without output explode column - select k1 from table lateral view explode_split(k2, ",") tmp as e1; - */ - @Test - public void withoutOutputExplodeColumn() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 lateral view explode_split(k2, \",\") tmp as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("OUTPUT EXPRS:\n `k1`")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("tuple ids: 0 1")); - Assert.assertTrue(explainString.contains("TupleDescriptor{id=1, tbl=tmp")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1, colUniqueId=-1, type=varchar")); - } - - /* Case3 group by explode column - select k1, e1, count(*) from table lateral view explode_split(k2, ",") tmp as e1 group by k1 e1; - */ - @Test - public void groupByExplodeColumn() throws Exception { - String sql = - "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1, count(*) from db1.tbl1 lateral view explode_split(k2, \",\") tmp as e1 " - + "group by k1, e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - // group by node with k1, e1 - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 2, "AGGREGATE (update finalize)")); - Assert.assertTrue(explainString.contains("group by: `k1`, `e1`")); - // table function node - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("tuple ids: 0 1")); - Assert.assertTrue(explainString.contains("TupleDescriptor{id=1, tbl=tmp")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1, colUniqueId=-1, type=varchar")); - // group by tuple - Assert.assertTrue(explainString.contains("TupleDescriptor{id=2, tbl=null")); - } - - /* Case4 where explode column - select k1, e1 from table lateral view explode_split(k2, ",") tmp as e1 where e1 = "1"; - */ - @Test - public void whereExplodeColumn() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k2, \",\") tmp as e1 " - + "where e1='1'; "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("`e1` = '1'")); - Assert.assertTrue(explainString.contains("tuple ids: 0 1")); - Assert.assertTrue(explainString.contains("TupleDescriptor{id=1, tbl=tmp")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1, colUniqueId=-1, type=varchar")); - } - - /* Case5 where normal column - select k1, e1 from table lateral view explode_split(k2, ",") tmp as e1 where k1 = 1; - */ - @Test - public void whereNormalColumn() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k2, \",\") tmp as e1 " - + "where k1=1; "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("tuple ids: 0 1")); - Assert.assertTrue(explainString.contains("TupleDescriptor{id=1, tbl=tmp")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e1, colUniqueId=-1, type=varchar")); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 0, "OlapScanNode")); - Assert.assertTrue(explainString.contains("`k1` = 1")); - } - - /* Case6 multi lateral view - select k1, e1, e2 from table lateral view explode_split(k2, ",") tmp1 as e1 - lateral view explode_split(k2, ",") tmp2 as e2; - */ - @Test - public void testMultiLateralView() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1, e2 from db1.tbl1 lateral view explode_split(k2, \",\") tmp1 as e1" - + " lateral view explode_split(k2, \",\") tmp2 as e2;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains( - "table function: explode_split(`db1`.`tbl1`.`k2`, ',') explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1 2")); - // lateral view 2 tuple - Assert.assertTrue(explainString.contains("TupleDescriptor{id=1, tbl=tmp2")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=e2, colUniqueId=-1, type=varchar")); - // lateral view 1 tuple - Assert.assertTrue(explainString.contains("TupleDescriptor{id=2, tbl=tmp1")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=2, col=e1, colUniqueId=-1, type=varchar")); - } - - // test explode_split function - // k1 int ,k2 string - /* Case1 error param - select k1, e1 from table lateral view explode_split(k2) tmp as e1; - select k1, e1 from table lateral view explode_split(k1) tmp as e1; - select k1, e1 from table lateral view explode_split(k2, k2) tmp as e1; - */ - @Test - public void errorParam() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k2) tmp as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql); - Assert.assertTrue(explainString.contains("No matching function with signature: explode_split(varchar(1))")); - - sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k1) tmp as e1;"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql); - Assert.assertTrue(explainString.contains("No matching function with signature: explode_split(int)")); - } - - /* Case2 table function in where stmt - select k1 from table where explode_split(k2, ",") = "1"; - */ - @Test - public void tableFunctionInWhere() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from db1.tbl1 where explode_split(k2, \",\");"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql); - Assert.assertTrue(explainString, - explainString.contains("No matching function with signature: explode_split(varchar(1), varchar(65533)).")); - } - - // test projection - /* Case1 the source column is not be projected - select k1, e1 from table lateral view explode_split(k2, ",") t1 as e1 - project column: k1, e1 - prune column: k2 - */ - @Test - public void nonProjectSourceColumn() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k2, \",\") tmp1 as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1")); - Assert.assertTrue(explainString.contains("output slot id: 1 2")); - } - - /* - Case2 the lateral view column is projected when it is in the agg function. - select k1, sum(cast(e1 as int)) from table lateral view explode_split(k2, ",") t1 as e1 group by k1; - project column: k1, e1 - prune column: k2 - */ - @Test - public void projectLateralViewColumn() throws Exception { - String sql = - "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, sum(cast(e1 as int)) from db1.tbl1 lateral view explode_split(k2, \",\") tmp1 as e1" - + " group by k1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1")); - Assert.assertTrue(explainString.contains("output slot id: 1 2")); - } - - /* - Case3 the source column is not be projected when it is in the where clause - select k1, e1 from table lateral view explode_split(k2, ",") t1 as e1 where k2=1; - project column: k1, e1 - prune column: k2 - */ - @Test - public void nonProjectSourceColumnWhenInWhereClause() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl1 lateral view explode_split(k2, \",\") tmp1 as e1" - + " where k2=1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(`db1`.`tbl1`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1")); - Assert.assertTrue(explainString.contains("output slot id: 1 2")); - } - - /* - Case4 the source column is projected when predicate could not be pushed down - select a.k1, t1.e1 from table a lateral view explode_split(k2, ",") t1 as e1 - right join table b on a.k1=b.k1 where k2=1; - project column: k1, k2, e1 - */ - @Test - public void projectSourceColumnWhenPredicateCannotPushedDown() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1, tmp1.e1 from db1.tbl1 a lateral view explode_split(k2, \",\") tmp1 as e1" - + " right join db1.tbl1 b on a.k1=b.k1 where a.k2=1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains("table function: explode_split(`a`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1")); - Assert.assertTrue(explainString.contains("output slot id: 0 1 2")); - } - - /* - Case5 There is only one source column in select items - select a.k1 from table a lateral view explode_split(k2, ",") t1 as e1 - left join table b on a.k1=b.k1 where k2=1 - project column: k1 - prune column: k2, e1 - */ - @Test - public void nonProjectLateralColumnAndSourceColumn() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(k2, \",\") tmp1 as e1" - + " left join db1.tbl1 b on a.k1=b.k1 where a.k2=1"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains("table function: explode_split(`a`.`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1")); - Assert.assertTrue(explainString.contains("output slot id: 2")); - } - - // scalar function in explode_split - /* - Case1 invalid column in scalar function - select a.k1 from table a lateral view explode_split(t2.k2, ",") t1 as e1 - invalid column: t2.k2 - Case2 - select a.k1 from table a lateral view explode_split(k100, ",") t1 as e1 - invalid column: t1.k100 - Case3 - select a.k1 from db1.table a lateral view explode_split(db2.table.k2, ",") t1 as e1 - invalid column: db2.table.k2 - */ - @Test - public void invalidColumnInExplodeSplit() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(tbl2.k1, \",\") tmp1 as e1"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString, explainString.contains("The column `tbl2`.`k1` in lateral view must come from the origin table `a`")); - sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(k100, \",\") tmp1 as e1"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString, explainString.contains("Unknown column 'k100'")); - sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(db2.tbl1.k2, \",\") tmp1 as e1"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString, explainString.contains("The column `db2`.`tbl1`.`k2` in lateral view must come from the origin table")); - } - - /* - Case1 invalid agg function - select a.k1 from db1.tbl1 a lateral view explode_split(sum(a.k1), ",") tmp1 as e1 - Case2 subquery - select a.k1 from db1.tbl1 a lateral view explode_split(a in ) - */ - @Test - public void invalidFunctionInLateralView() throws Exception { - String sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(sum(k1), \",\") tmp1 as e1"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("Agg function are not allowed in lateral view.")); - sql = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(k1=(select k1 from db1.tbl1), \",\") tmp1 as e1"; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("Subquery is not allowed in lateral view")); - } - - /* - Case1 valid scalar function - select a.k1 from db1.tbl1 a lateral view explode_split(concat('a', ',', 'b'), ",") tmp1 as e1 - */ - @Test - public void scalarFunctionInLateralView() throws Exception { - String sql = - "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ a.k1 from db1.tbl1 a lateral view explode_split(concat(k2, ',' , k3), \",\") tmp1 as e1 "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue( - explainString.contains("table function: explode_split(concat(`a`.`k2`, ',', `a`.`k3`), ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 1")); - Assert.assertTrue(explainString.contains("output slot id: 3")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=0, col=k2, colUniqueId=1, type=varchar(1)")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=1, col=k3, colUniqueId=2, type=varchar(1)")); - } - - // lateral view of subquery - /* - Case1 reduce tuple of subquery - select e1 from (select k1 as c1 from tbl1) tmp1 lateral view explode_split(c1, ",") tmp2 as e1 - */ - @Test - public void lateralViewColumnOfReduceTuple() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ e1 from (select k2 as c1 from db1.tbl1) a lateral view explode_split(c1, \",\") tmp1 as e1 "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 1, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains("table function: explode_split(`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 2")); - Assert.assertTrue(explainString.contains("output slot id: 2")); - Assert.assertTrue(explainString.contains("tuple ids: 0 2")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=2, col=e1, colUniqueId=-1, type=varchar")); - } - - /* - Case2 agg column of inlineview - select e1 from (select k1 as c1 from tbl1 group by k1) tmp1 lateral view explode_split(c1, ",") tmp2 as e1 - */ - @Test - public void aggInlineView() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ e1 from (select k2 as c1 from db1.tbl1 group by c1) a lateral view explode_split(c1, \",\") tmp1 as e1 "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 2, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains("table function: explode_split(`k2`, ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 3")); - Assert.assertTrue(explainString.contains("output slot id: 3")); - Assert.assertTrue(explainString.contains("tuple ids: 1 3")); - Assert.assertTrue(explainString.contains("SlotDescriptor{id=3, col=e1, colUniqueId=-1, type=varchar")); - } - - /* - Case3 materialize inline view column - select c1, e1 from (select k1 as c1, min(k2) as c2 from tbl1 group by k1) tmp1 lateral view explode_split(c2, ",") tmp2 as e1 - */ - @Test - public void aggColumnInlineViewInTB() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ c1, e1 from (select k1 as c1, min(k2) as c2 from db1.tbl1 group by c1) a " - + "lateral view explode_split(c2, \",\") tmp1 as e1"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 2, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains("table function: explode_split(min(`k2`), ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 3")); - Assert.assertTrue(explainString.contains("output slot id: 2 6")); - Assert.assertTrue(explainString.contains("tuple ids: 1 3")); - String formatString = explainString.replaceAll(" ", ""); - Assert.assertTrue(formatString.contains( - "SlotDescriptor{id=0,col=k1,colUniqueId=0,type=int" - )); - Assert.assertTrue(formatString.contains( - "SlotDescriptor{id=1,col=k2,colUniqueId=1,type=varchar(1)" - )); - Assert.assertTrue(formatString.contains( - "SlotDescriptor{id=2,col=k1,colUniqueId=0,type=int" - )); - Assert.assertTrue(formatString.contains( - "SlotDescriptor{id=3,col=null,colUniqueId=null,type=varchar" - )); - Assert.assertTrue(formatString.contains( - "SlotDescriptor{id=6,col=e1,colUniqueId=-1,type=varchar" - )); - } - - @Test - public void testExplodeBitmap() throws Exception { - String sql = "desc select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl2 lateral view explode_bitmap(v1) tmp1 as e1 "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - System.out.println(explainString); - Assert.assertTrue(explainString.contains("table function: explode_bitmap(`db1`.`tbl2`.`v1`)")); - Assert.assertTrue(explainString.contains("output slot id: 1 2")); - } - - @Test - public void testExplodeJsonArray() throws Exception { - String sql = "desc select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl2 lateral view explode_json_array_int('[1,2,3]') tmp1 as e1 "; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - System.out.println(explainString); - Assert.assertTrue(explainString.contains("table function: explode_json_array_int('[1,2,3]')")); - Assert.assertTrue(explainString.contains("output slot id: 0 1")); - - sql = "desc select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl2 lateral view explode_json_array_string('[\"a\",\"b\",\"c\"]') tmp1 as e1 "; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - System.out.println(explainString); - Assert.assertTrue(explainString.contains("table function: explode_json_array_string('[\"a\",\"b\",\"c\"]')")); - Assert.assertTrue(explainString.contains("output slot id: 0 1")); - - sql = "desc select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl2 lateral view explode_json_array_double('[1.1, 2.2, 3.3]') tmp1 as e1 "; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - System.out.println(explainString); - Assert.assertTrue(explainString.contains("table function: explode_json_array_double('[1.1, 2.2, 3.3]')")); - Assert.assertTrue(explainString.contains("output slot id: 0 1")); - - sql = "desc select /*+ SET_VAR(enable_nereids_planner=false) */ k1, e1 from db1.tbl2 lateral view explode_json_array_json('[{\"id\":1,\"name\":\"John\"},{\"id\":2,\"name\":\"Mary\"},{\"id\":3,\"name\":\"Bob\"}]') tmp1 as e1 "; - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - System.out.println(explainString); - Assert.assertTrue(explainString.contains("table function: explode_json_array_json('[{\"id\":1,\"name\":\"John\"},{\"id\":2,\"name\":\"Mary\"},{\"id\":3,\"name\":\"Bob\"}]')")); - Assert.assertTrue(explainString.contains("output slot id: 0 1")); - } - - /* - Case4 agg and order column in the same stmt with lateral view - select min(c1) from (select k1 as c1, min(k2) as c2 from tbl1 group by k1) tmp1 - lateral view explode_split(c2, ",") tmp2 as e1 order by min(c1) - */ - @Test - public void aggColumnForbidden() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ min(c1) from (select k1 as c1, min(k2) as c2 from db1.tbl1 group by c1) a " - + "lateral view explode_split(c2, \",\") tmp1 as e1 order by min(c1)"; - String errorMsg = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(errorMsg.toLowerCase().contains("lateral view as a inline view")); - } - - /* - Case5 agg and order column in the outer level - select min(c1) from (select c1 from (select k1 as c1, min(k2) as c2 from tbl1 group by k1) tmp1 - lateral view explode_split(c2, ",") tmp2 as e1 ) tmp3 - */ - @Test - public void aggColumnInOuterQuery() throws Exception { - String sql = "desc verbose select /*+ SET_VAR(enable_nereids_planner=false) */ min(c1) from (select c1 from" - + " (select k1 as c1, min(k2) as c2 from db1.tbl1 group by c1) a " - + "lateral view explode_split(c2, \",\") tmp1 as e1) tmp2"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(UtFrameUtils.checkPlanResultContainsNode(explainString, 2, "TABLE FUNCTION NODE")); - Assert.assertTrue(explainString.contains("table function: explode_split(min(`k2`), ',')")); - Assert.assertTrue(explainString.contains("lateral view tuple id: 3")); - Assert.assertTrue(explainString.contains("output slot id: 2")); - Assert.assertTrue(explainString.contains("tuple ids: 1 3")); - } - - @Test - public void testLateralViewWithView() throws Exception { - // test 1 - String createViewStr = "create view db1.v1 (k1,e1) as select /*+ SET_VAR(enable_nereids_planner=false) */ k1,e1" - + " from db1.table_for_view lateral view explode_split(k3,',') tmp as e1;"; - CreateViewStmt createViewStmt = (CreateViewStmt) UtFrameUtils.parseAndAnalyzeStmt(createViewStr, ctx); - Env.getCurrentEnv().createView(createViewStmt); - - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.v1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("output slot id: 1 2")); - // query again to see if it has error - explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("output slot id: 1 2")); - } - - @Test - public void testLateralViewWithWhere() throws Exception { - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp as e1" - + " where k1 in (select k2 from db1.table_for_view);"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("join op: LEFT SEMI JOIN(BROADCAST)")); - Assert.assertTrue(explainString.contains("`k1` = `k2`")); - Assert.assertTrue(!explainString.contains("`k2` = `k2`")); - } - - @Test - public void testLateralViewWithCTE() throws Exception { - String sql = "with tmp as (select k1,e1 from db1.table_for_view lateral view explode_split(k3,',') tmp2 as e1)" - + " select /*+ SET_VAR(enable_nereids_planner=false) */ * from tmp;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertTrue(explainString.contains("table function:" - + " explode_split(`db1`.`table_for_view`.`k3`, ',') ")); - } - - @Test - public void testLateralViewWithCTEBug() throws Exception { - String sql = "with tmp as (select * from db1.table_for_view where k2=1)" - + " select /*+ SET_VAR(enable_nereids_planner=false) */ k1,e1 from tmp lateral view explode_split(k3,',') tmp2 as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertFalse(explainString.contains("Unknown column 'e1' in 'table list'")); - } - - @Test - public void testLateralViewUnknownColumnBug() throws Exception { - // test2 - String createViewStr = "create view db1.v2 (k1,k3) as select k1,k3 from db1.table_for_view;"; - CreateViewStmt createViewStmt = (CreateViewStmt) UtFrameUtils.parseAndAnalyzeStmt(createViewStr, ctx); - Env.getCurrentEnv().createView(createViewStmt); - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ k1,e1 from db1.v2 lateral view explode_split(k3,',') tmp as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertFalse(explainString.contains("Unknown column 'e1' in 'table list'")); - } - - - // The 'k1' column in 'd' view should be materialized - // Fix #8850 - @Test - public void testLateralViewWithInlineViewBug() throws Exception { - String sql = "with d as (select k1+k1 as k1 from db1.table_for_view ) " - + "select /*+ SET_VAR(enable_nereids_planner=false) */ k1 from d lateral view explode_split(k1,',') tmp as e1;"; - String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(ctx, sql, true); - Assert.assertFalse(explainString.contains("Unexpected exception: org.apache.doris.analysis.FunctionCallExpr" - + " cannot be cast to org.apache.doris.analysis.SlotRef")); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/policy/PolicyTest.java b/fe/fe-core/src/test/java/org/apache/doris/policy/PolicyTest.java index aa04c14bbe9c6b..f803dc10563193 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/policy/PolicyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/policy/PolicyTest.java @@ -105,22 +105,6 @@ public void testNoPolicy() throws Exception { @Test public void testNormalSql() throws Exception { - // test user - createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); - String queryStr = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`k1` = 1")); - dropPolicy("DROP ROW POLICY test_row_policy ON test.table1"); - // test role - createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO ROLE role1 USING (k1 = 2)"); - queryStr = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`k1` = 2")); - dropPolicy("DROP ROW POLICY test_row_policy ON test.table1 for role role1"); - } - - @Test - public void testNormalSqlNereidsPlanners() throws Exception { // test user createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); String queryStr = "EXPLAIN select * from test.table1"; @@ -137,16 +121,6 @@ public void testNormalSqlNereidsPlanners() throws Exception { @Test public void testUniqueTable() throws Exception { - // test user - createPolicy("CREATE ROW POLICY test_unique_policy ON test.table3 AS PERMISSIVE TO test_policy USING (k1 = 1)"); - String queryStr = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table3"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`k1` = 1")); - dropPolicy("DROP ROW POLICY test_unique_policy ON test.table3"); - } - - @Test - public void testUniqueTableNereidsPlanners() throws Exception { // test user createPolicy("CREATE ROW POLICY test_unique_policy ON test.table3 AS PERMISSIVE TO test_policy USING (k1 = 1)"); String queryStr = "EXPLAIN select * from test.table3"; @@ -157,20 +131,6 @@ public void testUniqueTableNereidsPlanners() throws Exception { @Test public void testAliasSql() throws Exception { - createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); - String queryStr = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1 a"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`a`.`k1` = 1")); - queryStr = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1 b"; - explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`b`.`k1` = 1")); - dropPolicy("DROP ROW POLICY test_row_policy ON test.table1"); - } - - @Test - public void testAliasSqlNereidsPlanner() throws Exception { - boolean beforeConfig = connectContext.getSessionVariable().isEnableNereidsPlanner(); - connectContext.getSessionVariable().setEnableNereidsPlanner(true); createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); String queryStr = "EXPLAIN select * from test.table1 a"; String explainString = getSQLPlanOrErrorMsg(queryStr); @@ -179,21 +139,10 @@ public void testAliasSqlNereidsPlanner() throws Exception { explainString = getSQLPlanOrErrorMsg(queryStr); Assertions.assertTrue(explainString.contains("k1[#0] = 1")); dropPolicy("DROP ROW POLICY test_row_policy ON test.table1"); - connectContext.getSessionVariable().setEnableNereidsPlanner(beforeConfig); } @Test public void testUnionSql() throws Exception { - createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); - String queryStr - = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1 union all select * from test.table1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`k1` = 1")); - dropPolicy("DROP ROW POLICY test_row_policy ON test.table1"); - } - - @Test - public void testUnionSqlNereidsPlanner() throws Exception { createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); String queryStr = "EXPLAIN select * from test.table1 union all select * from test.table1"; String explainString = getSQLPlanOrErrorMsg(queryStr); @@ -203,16 +152,6 @@ public void testUnionSqlNereidsPlanner() throws Exception { @Test public void testInsertSelectSql() throws Exception { - createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); - String queryStr - = "EXPLAIN insert into test.table1 select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("`k1` = 1")); - dropPolicy("DROP ROW POLICY test_row_policy ON test.table1"); - } - - @Test - public void testInsertSelectSqlNereidsPlanner() throws Exception { createPolicy("CREATE ROW POLICY test_row_policy ON test.table1 AS PERMISSIVE TO test_policy USING (k1 = 1)"); String queryStr = "EXPLAIN insert into test.table1 select * from test.table1"; String explainString = getSQLPlanOrErrorMsg(queryStr); @@ -264,21 +203,6 @@ public void testDropPolicy() throws Exception { @Test public void testMergeFilter() throws Exception { - createPolicy("CREATE ROW POLICY test_row_policy1 ON test.table1 AS RESTRICTIVE TO test_policy USING (k1 = 1)"); - createPolicy("CREATE ROW POLICY test_row_policy2 ON test.table1 AS RESTRICTIVE TO ROLE role1 USING (k2 = 1)"); - createPolicy("CREATE ROW POLICY test_row_policy3 ON test.table1 AS PERMISSIVE TO ROLE role1 USING (k2 = 2)"); - createPolicy("CREATE ROW POLICY test_row_policy4 ON test.table1 AS PERMISSIVE TO test_policy USING (k2 = 1)"); - String queryStr = "EXPLAIN select /*+ SET_VAR(enable_nereids_planner=false) */ * from test.table1"; - String explainString = getSQLPlanOrErrorMsg(queryStr); - Assertions.assertTrue(explainString.contains("(((`k1` = 1) AND (`k2` = 1)) AND ((`k2` = 2) OR (`k2` = 1)))")); - dropPolicy("DROP ROW POLICY test_row_policy1 ON test.table1"); - dropPolicy("DROP ROW POLICY test_row_policy2 ON test.table1"); - dropPolicy("DROP ROW POLICY test_row_policy3 ON test.table1"); - dropPolicy("DROP ROW POLICY test_row_policy4 ON test.table1"); - } - - @Test - public void testMergeFilterNereidsPlanner() throws Exception { createPolicy("CREATE ROW POLICY test_row_policy1 ON test.table1 AS RESTRICTIVE TO test_policy USING (k1 = 1)"); createPolicy("CREATE ROW POLICY test_row_policy3 ON test.table1 AS PERMISSIVE TO ROLE role1 USING (k2 = 2)"); createPolicy("CREATE ROW POLICY test_row_policy4 ON test.table1 AS PERMISSIVE TO test_policy USING (k2 = 1)"); @@ -295,26 +219,6 @@ public void testMergeFilterNereidsPlanner() throws Exception { @Test public void testComplexSql() throws Exception { - createPolicy("CREATE ROW POLICY test_row_policy1 ON test.table1 AS RESTRICTIVE TO test_policy USING (k1 = 1)"); - createPolicy("CREATE ROW POLICY test_row_policy2 ON test.table1 AS RESTRICTIVE TO test_policy USING (k2 = 1)"); - String joinSql - = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from table1 join table2 on table1.k1=table2.k1"; - Assertions.assertTrue(getSQLPlanOrErrorMsg(joinSql).contains("(`k1` = 1) AND (`k2` = 1)")); - String unionSql - = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from table1 union select * from table2"; - Assertions.assertTrue(getSQLPlanOrErrorMsg(unionSql).contains("(`k1` = 1) AND (`k2` = 1)")); - String subQuerySql - = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from table2 where k1 in (select k1 from table1)"; - Assertions.assertTrue(getSQLPlanOrErrorMsg(subQuerySql).contains("(`k1` = 1) AND (`k2` = 1)")); - String aliasSql - = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from table1 t1 join table2 t2 on t1.k1=t2.k1"; - Assertions.assertTrue(getSQLPlanOrErrorMsg(aliasSql).contains("(`t1`.`k1` = 1) AND (`t1`.`k2` = 1)")); - dropPolicy("DROP ROW POLICY test_row_policy1 ON test.table1"); - dropPolicy("DROP ROW POLICY test_row_policy2 ON test.table1"); - } - - @Test - public void testComplexSqlNereidsPlanner() throws Exception { createPolicy("CREATE ROW POLICY test_row_policy1 ON test.table1 AS RESTRICTIVE TO test_policy USING (k1 = 1)"); createPolicy("CREATE ROW POLICY test_row_policy2 ON test.table1 AS RESTRICTIVE TO test_policy USING (k2 = 1)"); String joinSql = "select * from table1 join table2 on table1.k1=table2.k1"; diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/SessionVariablesTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/SessionVariablesTest.java index 05408d3b67405e..88fd4c93203321 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/SessionVariablesTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/SessionVariablesTest.java @@ -26,11 +26,9 @@ import org.apache.doris.common.PatternMatcher; import org.apache.doris.common.PatternMatcherWrapper; import org.apache.doris.common.VariableAnnotation; -import org.apache.doris.common.util.ProfileManager; import org.apache.doris.thrift.TQueryOptions; import org.apache.doris.utframe.TestWithFeService; -import org.junit.Assert; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -42,7 +40,6 @@ public class SessionVariablesTest extends TestWithFeService { private SessionVariable sessionVariable; private int numOfForwardVars; - private ProfileManager profileManager = ProfileManager.getInstance(); @Override protected void runBeforeAll() throws Exception { @@ -68,33 +65,33 @@ public void testExperimentalSessionVariables() throws Exception { connectContext.setThreadLocalInfo(); // 1. set without experimental SessionVariable sessionVar = connectContext.getSessionVariable(); - boolean enableNereids = sessionVar.isEnableNereidsPlanner(); - String sql = "set enable_nereids_planner=" + (enableNereids ? "false" : "true"); + boolean enableShareScan = sessionVar.getEnableSharedScan(); + String sql = "set enable_shared_scan=" + (enableShareScan ? "false" : "true"); SetStmt setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); SetExecutor setExecutor = new SetExecutor(connectContext, setStmt); setExecutor.execute(); - Assert.assertNotEquals(sessionVar.isEnableNereidsPlanner(), enableNereids); + Assertions.assertNotEquals(sessionVar.getEnableSharedScan(), enableShareScan); // 2. set with experimental - enableNereids = sessionVar.isEnableNereidsPlanner(); - sql = "set experimental_enable_nereids_planner=" + (enableNereids ? "false" : "true"); + enableShareScan = sessionVar.getEnableSharedScan(); + sql = "set experimental_enable_shared_scan=" + (enableShareScan ? "false" : "true"); setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); setExecutor = new SetExecutor(connectContext, setStmt); setExecutor.execute(); - Assert.assertNotEquals(sessionVar.isEnableNereidsPlanner(), enableNereids); + Assertions.assertNotEquals(sessionVar.getEnableSharedScan(), enableShareScan); // 3. set global without experimental - enableNereids = sessionVar.isEnableNereidsPlanner(); - sql = "set global enable_nereids_planner=" + (enableNereids ? "false" : "true"); + enableShareScan = sessionVar.getEnableSharedScan(); + sql = "set global enable_shared_scan=" + (enableShareScan ? "false" : "true"); setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); setExecutor = new SetExecutor(connectContext, setStmt); setExecutor.execute(); - Assert.assertNotEquals(sessionVar.isEnableNereidsPlanner(), enableNereids); + Assertions.assertNotEquals(sessionVar.getEnableSharedScan(), enableShareScan); // 4. set global with experimental - enableNereids = sessionVar.isEnableNereidsPlanner(); - sql = "set global experimental_enable_nereids_planner=" + (enableNereids ? "false" : "true"); + enableShareScan = sessionVar.getEnableSharedScan(); + sql = "set global experimental_enable_shared_scan=" + (enableShareScan ? "false" : "true"); setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); setExecutor = new SetExecutor(connectContext, setStmt); setExecutor.execute(); - Assert.assertNotEquals(sessionVar.isEnableNereidsPlanner(), enableNereids); + Assertions.assertNotEquals(sessionVar.getEnableSharedScan(), enableShareScan); // 5. set experimental for EXPERIMENTAL_ONLINE var boolean bucketShuffle = sessionVar.isEnableBucketShuffleJoin(); @@ -102,7 +99,7 @@ public void testExperimentalSessionVariables() throws Exception { setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); setExecutor = new SetExecutor(connectContext, setStmt); setExecutor.execute(); - Assert.assertNotEquals(sessionVar.isEnableBucketShuffleJoin(), bucketShuffle); + Assertions.assertNotEquals(sessionVar.isEnableBucketShuffleJoin(), bucketShuffle); // 6. set non experimental for EXPERIMENTAL_ONLINE var bucketShuffle = sessionVar.isEnableBucketShuffleJoin(); @@ -110,14 +107,14 @@ public void testExperimentalSessionVariables() throws Exception { setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); setExecutor = new SetExecutor(connectContext, setStmt); setExecutor.execute(); - Assert.assertNotEquals(sessionVar.isEnableBucketShuffleJoin(), bucketShuffle); + Assertions.assertNotEquals(sessionVar.isEnableBucketShuffleJoin(), bucketShuffle); // 4. set experimental for none experimental var sql = "set experimental_group_concat_max_len=5"; setStmt = (SetStmt) parseAndAnalyzeStmt(sql, connectContext); SetExecutor setExecutor2 = new SetExecutor(connectContext, setStmt); ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Unknown system variable", - () -> setExecutor2.execute()); + setExecutor2::execute); // 5. show variables String showSql = "show variables like '%experimental%'"; @@ -129,7 +126,7 @@ public void testExperimentalSessionVariables() throws Exception { } int num = sessionVar.getVariableNumByVariableAnnotation(VariableAnnotation.EXPERIMENTAL); List> result = VariableMgr.dump(showStmt.getType(), sessionVar, matcher); - Assert.assertEquals(num, result.size()); + Assertions.assertEquals(num, result.size()); } @Test @@ -140,7 +137,7 @@ public void testForwardSessionVariables() { vars.put(SessionVariable.ENABLE_PROFILE, "true"); sessionVariable.setForwardedSessionVariables(vars); - Assertions.assertEquals(true, sessionVariable.enableProfile); + Assertions.assertTrue(sessionVariable.enableProfile); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java index 852e4ba2de7eda..3a692ebed26076 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/StmtExecutorTest.java @@ -543,7 +543,7 @@ public void testSet(@Mocked SetStmt setStmt, @Mocked SqlParser parser, public void testStmtWithUserInfo(@Mocked StatementBase stmt, @Mocked ConnectContext context) throws Exception { StmtExecutor stmtExecutor = new StmtExecutor(ctx, stmt); Deencapsulation.setField(stmtExecutor, "parsedStmt", null); - Deencapsulation.setField(stmtExecutor, "originStmt", new OriginStatement("show databases;", 1)); + Deencapsulation.setField(stmtExecutor, "originStmt", new OriginStatement("show databases;", 0)); stmtExecutor.execute(); StatementBase newstmt = Deencapsulation.getField(stmtExecutor, "parsedStmt"); Assert.assertNotNull(newstmt.getUserInfo()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java deleted file mode 100644 index 915dce122d8dce..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/ExtractCommonFactorsRuleFunctionTest.java +++ /dev/null @@ -1,338 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.rewrite; - -import org.apache.doris.common.FeConstants; -import org.apache.doris.utframe.DorisAssert; -import org.apache.doris.utframe.UtFrameUtils; - -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.UUID; - -public class ExtractCommonFactorsRuleFunctionTest { - private static final Logger LOG = LogManager.getLogger(ExtractCommonFactorsRuleFunctionTest.class); - private static String baseDir = "fe"; - private static String runningDir = baseDir + "/mocked/ExtractCommonFactorsRuleFunctionTest/" - + UUID.randomUUID().toString() + "/"; - private static DorisAssert dorisAssert; - private static final String DB_NAME = "db1"; - private static final String TABLE_NAME_1 = "tb1"; - private static final String TABLE_NAME_2 = "tb2"; - private static final String TABLE_NAME_3 = "tb3"; - private static final String TABLE_NAME_4 = "nation"; - - @BeforeClass - public static void beforeClass() throws Exception { - FeConstants.default_scheduler_interval_millisecond = 10; - FeConstants.runningUnitTest = true; - UtFrameUtils.createDorisCluster(runningDir); - dorisAssert = new DorisAssert(); - dorisAssert.withDatabase(DB_NAME).useDatabase(DB_NAME); - String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME_1 - + " (k1 int, k2 int) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME_2 - + " (k1 int, k2 int) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME_3 - + " (k1 tinyint, k2 smallint, k3 int, k4 bigint, k5 largeint, k6 date, k7 datetime, k8 float, k9 double) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - createTableSQL = "CREATE TABLE " + DB_NAME + "." + TABLE_NAME_4 + "(\n" - + " `n_nationkey` int(11) NOT NULL,\n" - + " `n_name` varchar(25) NOT NULL,\n" - + " `n_regionkey` int(11) NOT NULL,\n" - + " `n_comment` varchar(152) NULL\n" - + ")\n" - + "DUPLICATE KEY(`n_nationkey`)\n" - + "DISTRIBUTED BY HASH(`n_nationkey`) BUCKETS 1\n" - + "PROPERTIES (\n" - + "\"replication_allocation\" = \"tag.location.default: 1\"\n" - + ");"; - dorisAssert.withTable(createTableSQL); - } - - @AfterClass - public static void afterClass() throws Exception { - UtFrameUtils.cleanDorisFeDir(baseDir); - } - - @Test - public void testWithoutRewritten() throws Exception { - String query = "select * from tb1, tb2 where (tb1.k1 =1) or (tb2.k2=1)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testCommonFactors() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1=tb2.k1 and tb1.k2 =1) or (tb1.k1=tb2.k1 and tb2.k2=1)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("HASH JOIN")); - Assert.assertEquals(1, StringUtils.countMatches(planString, "`tb1`.`k1` = `tb2`.`k1`")); - } - - @Test - public void testWideCommonFactorsWithOrPredicate() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 where tb1.k1 > 1000 or tb1.k1 < 200 or tb1.k1 = 300"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("((`tb1`.`k1` = 300) OR ((`tb1`.`k1` > 1000) OR (`tb1`.`k1` < 200)))")); - } - - @Test - public void testWideCommonFactorsWithEqualPredicate() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1=1 and tb2.k1=1) or (tb1.k1 =2 and tb2.k1=2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` IN (1, 2)")); - Assert.assertTrue(planString.contains("`tb2`.`k1` IN (1, 2)")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWithoutWideCommonFactorsWhenInfinityRangePredicate() throws Exception { - String query = "select * from tb1, tb2 where (tb1.k1>1 and tb2.k1=1) or (tb1.k1 <2 and tb2.k2=2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("(`tb1`.`k1` > 1 OR `tb1`.`k1` < 2)")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsWithMergeRangePredicate() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1 between 1 and 3 and tb2.k1=1) or (tb1.k1 <2 and tb2.k2=2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` <= 3")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsWithIntersectRangePredicate() throws Exception { - String query = "select * from tb1, tb2 where (tb1.k1 >1 and tb1.k1 <3 and tb1.k1 <5 and tb2.k1=1) " - + "or (tb1.k1 <2 and tb2.k2=2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsWithDuplicateRangePredicate() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1 >1 and tb1.k1 >1 and tb1.k1 <5 and tb2.k1=1) " - + "or (tb1.k1 <2 and tb2.k2=2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` < 5")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsWithInPredicate() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1 in (1) and tb2.k1 in(1)) " - + "or (tb1.k1 in(2) and tb2.k1 in(2))"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` IN (1, 2)")); - Assert.assertTrue(planString.contains("`tb2`.`k1` IN (1, 2)")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsWithDuplicateInPredicate() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1 in (1,2) and tb2.k1 in(1,2)) " - + "or (tb1.k1 in(3) and tb2.k1 in(2))"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` IN (1, 2, 3)")); - Assert.assertTrue(planString.contains("`tb2`.`k1` IN (1, 2)")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsWithRangeAndIn() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1 between 1 and 3 and tb2.k1 in(1,2)) " - + "or (tb1.k1 between 2 and 4 and tb2.k1 in(3))"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` >= 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` <= 4")); - Assert.assertTrue(planString.contains("`tb2`.`k1` IN (1, 2, 3)")); - Assert.assertTrue(planString.contains("NESTED LOOP JOIN")); - } - - @Test - public void testWideCommonFactorsAndCommonFactors() throws Exception { - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where (tb1.k1 between 1 and 3 and tb1.k1=tb2.k1) " - + "or (tb1.k1=tb2.k1 and tb1.k1 between 2 and 4)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` >= 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` <= 4")); - Assert.assertTrue(planString.contains("`tb1`.`k1` = `tb2`.`k1`")); - Assert.assertTrue(planString.contains("HASH JOIN")); - } - - // TPC-H Q19 - @Test - public void testComplexQuery() throws Exception { - String createTableSQL = "CREATE TABLE `lineitem` (\n" - + " `l_orderkey` int(11) NOT NULL COMMENT \"\",\n" - + " `l_partkey` int(11) NOT NULL COMMENT \"\",\n" - + " `l_suppkey` int(11) NOT NULL COMMENT \"\",\n" - + " `l_linenumber` int(11) NOT NULL COMMENT \"\",\n" - + " `l_quantity` decimal(15, 2) NOT NULL COMMENT \"\",\n" - + " `l_extendedprice` decimal(15, 2) NOT NULL COMMENT \"\",\n" - + " `l_discount` decimal(15, 2) NOT NULL COMMENT \"\",\n" - + " `l_tax` decimal(15, 2) NOT NULL COMMENT \"\",\n" - + " `l_returnflag` char(1) NOT NULL COMMENT \"\",\n" - + " `l_linestatus` char(1) NOT NULL COMMENT \"\",\n" - + " `l_shipdate` date NOT NULL COMMENT \"\",\n" - + " `l_commitdate` date NOT NULL COMMENT \"\",\n" - + " `l_receiptdate` date NOT NULL COMMENT \"\",\n" - + " `l_shipinstruct` char(25) NOT NULL COMMENT \"\",\n" - + " `l_shipmode` char(10) NOT NULL COMMENT \"\",\n" - + " `l_comment` varchar(44) NOT NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`l_orderkey`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 2\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ");"; - dorisAssert.withTable(createTableSQL); - createTableSQL = "CREATE TABLE `part` (\n" - + " `p_partkey` int(11) NOT NULL COMMENT \"\",\n" - + " `p_name` varchar(55) NOT NULL COMMENT \"\",\n" - + " `p_mfgr` char(25) NOT NULL COMMENT \"\",\n" - + " `p_brand` char(10) NOT NULL COMMENT \"\",\n" - + " `p_type` varchar(25) NOT NULL COMMENT \"\",\n" - + " `p_size` int(11) NOT NULL COMMENT \"\",\n" - + " `p_container` char(10) NOT NULL COMMENT \"\",\n" - + " `p_retailprice` decimal(15, 2) NOT NULL COMMENT \"\",\n" - + " `p_comment` varchar(23) NOT NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`p_partkey`)\n" - + "COMMENT \"OLAP\"\n" - + "DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 2\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"in_memory\" = \"false\",\n" - + "\"storage_format\" = \"V2\"\n" - + ");"; - dorisAssert.withTable(createTableSQL); - String query = "select /*+ SET_VAR(enable_nereids_planner=false,enable_fold_constant_by_be=false) */ sum(l_extendedprice* (1 - l_discount)) as revenue " - + "from lineitem, part " - + "where ( p_partkey = l_partkey and p_brand = 'Brand#11' " - + "and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') " - + "and l_quantity >= 9 and l_quantity <= 9 + 10 " - + "and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') " - + "and l_shipinstruct = 'DELIVER IN PERSON' ) " - + "or ( p_partkey = l_partkey and p_brand = 'Brand#21' " - + "and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') " - + "and l_quantity >= 20 and l_quantity <= 20 + 10 " - + "and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') " - + "and l_shipinstruct = 'DELIVER IN PERSON' ) " - + "or ( p_partkey = l_partkey and p_brand = 'Brand#32' " - + "and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') " - + "and l_quantity >= 26 and l_quantity <= 26 + 10 " - + "and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') " - + "and l_shipinstruct = 'DELIVER IN PERSON' )"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("HASH JOIN")); - Assert.assertTrue(planString.contains("`l_partkey` = `p_partkey`")); - Assert.assertTrue(planString.contains("`l_shipmode` IN ('AIR', 'AIR REG')")); - Assert.assertTrue(planString.contains("`l_shipinstruct` = 'DELIVER IN PERSON'")); - Assert.assertTrue(planString.contains("(((`l_quantity` >= 9.00) AND (`l_quantity` <= 19.00)) " - + "OR ((`l_quantity` >= 20.00) AND (`l_quantity` <= 36.00)))")); - Assert.assertTrue(planString.contains("`p_size` >= 1")); - Assert.assertTrue(planString.contains("`p_brand` IN ('Brand#11', 'Brand#21', 'Brand#32')")); - Assert.assertTrue(planString.contains("`p_size` <= 15")); - Assert.assertTrue(planString.contains("`p_container` IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG', 'MED BAG', " - + "'MED BOX', 'MED PKG', 'MED PACK', 'LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')")); - } - - @Test - public void testRewriteLikePredicate() throws Exception { - // tinyint - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb3 where k1 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - dorisAssert.query(sql).explainContains("CAST(`k1` AS varchar(65533)) LIKE '%4%'"); - - // smallint - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb3 where k2 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - dorisAssert.query(sql).explainContains("CAST(`k2` AS varchar(65533)) LIKE '%4%'"); - - // int - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb3 where k3 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - dorisAssert.query(sql).explainContains("CAST(`k3` AS varchar(65533)) LIKE '%4%'"); - - // bigint - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb3 where k4 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - dorisAssert.query(sql).explainContains("CAST(`k4` AS varchar(65533)) LIKE '%4%'"); - - // largeint - sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb3 where k5 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - dorisAssert.query(sql).explainContains("CAST(`k5` AS varchar(65533)) LIKE '%4%'"); - } - - @Test - public void testRewriteLikePredicateDate() throws Exception { - // date - String sql = "select * from tb3 where k6 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - } - - @Test - public void testRewriteLikePredicateDateTime() throws Exception { - // datetime - String sql = "select * from tb3 where k7 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - dorisAssert.query(sql).explainQuery(); - } - - @Test - public void testRewriteLikePredicateFloat() throws Exception { - // date - String sql = "select * from tb3 where k8 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - } - - @Test - public void testRewriteLikePredicateDouble() throws Exception { - // date - String sql = "select * from tb3 where k9 like '%4%';"; - LOG.info("EXPLAIN:{}", dorisAssert.query(sql).explainQuery()); - } - - @Test - public void testExtractCommonFactorsWithOnClause() throws Exception { - String sql = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from\n" - + "db1.nation n1 join db1.nation n2\n" - + "on (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY')\n" - + "or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')"; - String explainStr = dorisAssert.query(sql).explainQuery(); - Assert.assertTrue(explainStr.contains("PREDICATES: `n1`.`n_name` IN ('FRANCE', 'GERMANY')")); - Assert.assertTrue(explainStr.contains("PREDICATES: `n2`.`n_name` IN ('FRANCE', 'GERMANY')")); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java deleted file mode 100644 index fdac5c3dcc3e6c..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/InferFiltersRuleTest.java +++ /dev/null @@ -1,404 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.rewrite; - -import org.apache.doris.common.FeConstants; -import org.apache.doris.qe.SessionVariable; -import org.apache.doris.utframe.DorisAssert; -import org.apache.doris.utframe.UtFrameUtils; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.UUID; - -public class InferFiltersRuleTest { - private static final String baseDir = "fe"; - private static final String runningDir = baseDir + "/mocked/InferFiltersRuleTest/" - + UUID.randomUUID() + "/"; - private static DorisAssert dorisAssert; - private static final String DB_NAME = "db1"; - private static final String TABLE_NAME_1 = "tb1"; - private static final String TABLE_NAME_2 = "tb2"; - private static final String TABLE_NAME_3 = "tb3"; - - @BeforeClass - public static void beforeClass() throws Exception { - FeConstants.default_scheduler_interval_millisecond = 10; - FeConstants.runningUnitTest = true; - UtFrameUtils.createDorisCluster(runningDir); - dorisAssert = new DorisAssert(); - dorisAssert.withDatabase(DB_NAME).useDatabase(DB_NAME); - String createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME_1 - + " (k1 int, k2 int) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME_2 - + " (k1 int, k2 int) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - createTableSQL = "create table " + DB_NAME + "." + TABLE_NAME_3 - + " (k1 tinyint, k2 smallint, k3 int, k4 bigint," - + " k5 largeint, k6 date, k7 datetime, k8 float, k9 double) " - + "distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; - dorisAssert.withTable(createTableSQL); - } - - @AfterClass - public static void afterClass() throws Exception { - UtFrameUtils.cleanDorisFeDir(baseDir); - } - - @Test - //set enableInferPredicate = false; - public void testWithoutRewritten() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(false); - Assert.assertFalse(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1, tb2 where tb1.k1 = 1 and tb1.k1 = tb2.k1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb2`.`k1` = 1")); - } - - @Test - public void testRewritten() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2 where tb1.k1 = 1 and tb1.k1 = tb2.k1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - } - - @Test - public void testUnequalSlotPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1, tb2 where tb1.k1 = 1 and tb1.k1 > tb2.k1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb2`.`k1` = 1")); - } - - @Test - public void testOn3TablesBothInnerJoinRewritten() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 inner join tb3" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb3.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testOn2TablesLeftSemiJoinEqLiteralAt2nd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 left semi join tb2 on tb1.k1 = tb2.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testOn2TablesLeftSemiJoinEqLiteralAt1st() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 left semi join tb2 on tb1.k1 = tb2.k1 and tb1.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - } - - @Test - public void testOn2TablesLeftAntiJoinEqLiteralAt2nd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 left anti join tb2 on tb1.k1 = tb2.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testOn2TablesLeftJoinNotInferable() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 left join tb2 on tb1.k1 = tb2.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb1`.`k1` = 1")); - } - - /* - the following 3 test case is valid. But we cannot tell them from other incorrect inferences. - In origin design we made a mistake: we assume inference is symmetrical. - For example, t1.x=t2.x and t1.x=1 => t2.x=1 - this is not always true. - if this is left join, t1 is left, t2.x=1 is not valid. - However, in inferFilterRule, we do not know whether t1.x is from left or right table. - And hence, we have to skip inference for outer/anti join for quick fix. - - @Test - public void testOn3Tables1stInner2ndRightJoinEqLiteralAt2nd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 inner join tb2 on tb1.k1 = tb2.k1" - + " right outer join tb3 on tb2.k1 = tb3.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - Assert.assertFalse(planString.contains("`tb3`.`k1` = 1")); - } - - @Test - public void testOn3Tables1stInner2ndRightJoinEqLiteralAt3rd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 inner join tb2 on tb1.k1 = tb2.k1" - + " right outer join tb3 on tb2.k1 = tb3.k1 and tb3.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - @Test - public void testOn2TablesLeftAntiJoinEqLiteralAt1st() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 left anti join tb2 on tb1.k1 = tb2.k1 and tb1.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - } - */ - @Test - public void testOnIsNotNullPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 on tb1.k1 = tb2.k1 right outer join tb3 on tb2.k1 = tb3.k1" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - Assert.assertTrue(planString, planString.contains("CAST(`tb3`.`k1` AS int)")); - } - - @Test - public void testOnBetweenPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 on tb1.k1 = tb2.k1 and tb1.k1 between 1 and 2"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` >= 1")); - Assert.assertTrue(planString.contains("`tb2`.`k1` <= 2")); - } - - @Test - public void testOnInPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 on tb1.k1 = tb2.k1 and tb1.k1 in (2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` IN (2)")); - } - - @Test - public void testWhere3TablesInnerJoinRewritten() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1, tb2, tb3 where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb3.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testWhere3TablesBothInnerJoinRewritten() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 inner join tb3" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb3.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testWhere3Tables1stInner2ndLeftJoinEqLiteralAt3rd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 left outer join tb3 on tb3.k1 = tb2.k1" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb3.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testWhere3Tables1stInner2ndLeftJoinEqLiteralAt2nd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 left outer join tb3 on tb3.k1 = tb2.k1" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - Assert.assertFalse(planString.contains("`tb3`.`k1` = 1")); - } - - @Test - public void testWhere3Tables1stInner2ndRightJoinEqLiteralAt2nd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 on tb1.k1 = tb2.k1 right outer join tb3 on tb2.k1 = tb3.k1" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString, planString.contains("`tb1`.`k1` = 1")); - Assert.assertTrue(planString, planString.contains("CAST(`tb3`.`k1` AS int) = 1")); - } - - @Test - public void testWhere3Tables1stInner2ndRightJoinEqLiteralAt3rd() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 inner join tb2 on tb1.k1 = tb2.k1 right outer join tb3 on tb2.k1 = tb3.k1" - + " where tb1.k1 = tb2.k1 and tb2.k1 = tb3.k1 and tb3.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb2`.`k1` = 1")); - Assert.assertFalse(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testWhereIsNotNullPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - sessionVariable.setEnableRewriteElementAtToSlot(false); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false,enable_fold_constant_by_be=false) */ * from tb1 inner join tb2 inner join tb3" - + " where tb1.k1 = tb3.k1 and tb2.k1 = tb3.k1 and tb1.k1 is not null"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb3`.`k1` IS NOT NULL")); - Assert.assertTrue(planString.contains("`tb2`.`k1` IS NOT NULL")); - } - - @Test - public void testWhereInPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 where tb1.k1 = tb2.k1 and tb1.k1 in (2)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` IN (2)")); - } - - @Test - public void testWhereBetweenPredicate() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 where tb1.k1 = tb2.k1 and tb1.k1 between 1 and 2"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` >= 1")); - Assert.assertTrue(planString.contains("`tb2`.`k1` <= 2")); - } - - @Test - public void testOnAndWhere2TablesLeftJoin2ndIsLiteral() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 left join tb2 on tb1.k1 = tb2.k1 where tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testOnAndWhere2TablesInnerJoin2ndIsLiteral() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 on tb1.k1 = tb2.k1 where tb2.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb1`.`k1` = 1")); - } - - @Test - public void testOnAndWhere2TableLeftJoin1stIsLiteral() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 left join tb2 on tb1.k1 = tb2.k1 where tb1.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - } - - @Test - public void testOnAndWhere2TablesInnerJoin1stIsLiteral() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from tb1 inner join tb2 on tb1.k1 = tb2.k1 where tb1.k1 = 1"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertTrue(planString.contains("`tb2`.`k1` = 1")); - } - - @Test - public void testSameAliasWithSlotEqualToLiteralInDifferentUnionChildren() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 inner join tb2 on tb1.k1 = tb2.k1" - + " union select * from tb1 inner join tb2 on tb1.k2 = tb2.k2 where tb1.k1 = 3"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb2`.`k1` = 3")); - } - - @Test - public void testSameAliasWithSlotInPredicateInDifferentUnionChildren() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 inner join tb2 on tb1.k1 = tb2.k1" - + " union select * from tb1 inner join tb2 on tb1.k2 = tb2.k2 where tb1.k1 in (3, 4, 5)"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb2`.`k1` IN (3, 4, 5)")); - } - - @Test - public void testSameAliasWithSlotIsNullInDifferentUnionChildren() throws Exception { - SessionVariable sessionVariable = dorisAssert.getSessionVariable(); - sessionVariable.setEnableInferPredicate(true); - Assert.assertTrue(sessionVariable.isEnableInferPredicate()); - String query = "select * from tb1 inner join tb2 on tb1.k1 = tb2.k1" - + " union select * from tb1 inner join tb2 on tb1.k2 = tb2.k2 where tb1.k1 is not null"; - String planString = dorisAssert.query(query).explainQuery(); - Assert.assertFalse(planString.contains("`tb2`.`k1` IS NOT NULL")); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRuleTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRuleTest.java deleted file mode 100644 index a169843f602ad0..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteBinaryPredicatesRuleTest.java +++ /dev/null @@ -1,129 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.rewrite; - -import org.apache.doris.analysis.BinaryPredicate; -import org.apache.doris.analysis.BinaryPredicate.Operator; -import org.apache.doris.analysis.BoolLiteral; -import org.apache.doris.analysis.Expr; -import org.apache.doris.analysis.LiteralExpr; -import org.apache.doris.analysis.SelectStmt; -import org.apache.doris.catalog.PrimitiveType; -import org.apache.doris.qe.StmtExecutor; -import org.apache.doris.utframe.TestWithFeService; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class RewriteBinaryPredicatesRuleTest extends TestWithFeService { - @Override - protected void runBeforeAll() throws Exception { - connectContext = createDefaultCtx(); - createDatabase("db"); - useDatabase("db"); - String createTable = "create table table1(id smallint, cost bigint sum) " - + "aggregate key(`id`) distributed by hash (`id`) buckets 4 " - + "properties (\"replication_num\"=\"1\");"; - createTable(createTable); - } - - @Test - public void testNormal() throws Exception { - testRewrite(Operator.EQ, "2.0", Operator.EQ, 2L, true); - testBoolean(Operator.EQ, "2.5", false); - - testBase(Operator.NE, "2.0", Operator.NE, 2L); - testBoolean(Operator.NE, "2.5", true); - - testBase(Operator.LE, "2.0", Operator.LE, 2L); - testBase(Operator.LE, "-2.5", Operator.LT, -2L); - testBase(Operator.LE, "2.5", Operator.LE, 2L); - - testBase(Operator.GE, "2.0", Operator.GE, 2L); - testBase(Operator.GE, "-2.5", Operator.GE, -2L); - testBase(Operator.GE, "2.5", Operator.GT, 2L); - - testBase(Operator.LT, "2.0", Operator.LT, 2L); - testBase(Operator.LT, "-2.5", Operator.LT, -2L); - testBase(Operator.LT, "2.5", Operator.LE, 2L); - - testBase(Operator.GT, "2.0", Operator.GT, 2L); - testBase(Operator.GT, "-2.5", Operator.GE, -2L); - testBase(Operator.GT, "2.5", Operator.GT, 2L); - } - - @Test - public void testOutOfRange() throws Exception { - // 32767 -32768 - testBoolean(Operator.EQ, "-32769.0", false); - testBase(Operator.EQ, "32767.0", Operator.EQ, 32767L); - - testBoolean(Operator.NE, "32768.0", true); - - testBoolean(Operator.LE, "32768.2", true); - testBoolean(Operator.LE, "-32769.1", false); - testBase(Operator.LE, "32767.0", Operator.LE, 32767L); - - testBoolean(Operator.GE, "32768.1", false); - testBoolean(Operator.GE, "-32769.1", true); - testBase(Operator.GE, "32767.0", Operator.GE, 32767L); - - testBoolean(Operator.LT, "32768.1", true); - testBoolean(Operator.LT, "-32769.1", false); - testBase(Operator.LT, "32767.1", Operator.LE, 32767L); - - testBoolean(Operator.GT, "32768.1", false); - testBoolean(Operator.GT, "-32769.1", true); - testBase(Operator.GT, "32767.0", Operator.GT, 32767L); - } - - private void testRewrite(Operator operator, String queryLiteral, Operator expectedOperator, long expectedChild1, - boolean expectedResultAfterRewritten) - throws Exception { - Expr expr1 = getExpr(operator, queryLiteral); - if (expr1 instanceof BoolLiteral) { - Assertions.assertEquals(((BoolLiteral) expr1).getValue(), expectedResultAfterRewritten); - } else { - testBase(operator, queryLiteral, expectedOperator, expectedChild1); - } - } - - private void testBase(Operator operator, String queryLiteral, Operator expectedOperator, long expectedChild1) - throws Exception { - Expr expr1 = getExpr(operator, queryLiteral); - Assertions.assertTrue(expr1 instanceof BinaryPredicate); - Assertions.assertEquals(expectedOperator, ((BinaryPredicate) expr1).getOp()); - Assertions.assertEquals(PrimitiveType.SMALLINT, expr1.getChild(0).getType().getPrimitiveType()); - Assertions.assertEquals(PrimitiveType.SMALLINT, expr1.getChild(1).getType().getPrimitiveType()); - Assertions.assertEquals(expectedChild1, ((LiteralExpr) expr1.getChild(1)).getLongValue()); - } - - private void testBoolean(Operator operator, String queryLiteral, boolean result) throws Exception { - Expr expr1 = getExpr(operator, queryLiteral); - Assertions.assertTrue(expr1 instanceof BoolLiteral); - Assertions.assertEquals(result, ((BoolLiteral) expr1).getValue()); - } - - private Expr getExpr(Operator operator, String queryLiteral) throws Exception { - String queryFormat = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from table1 where id %s %s;"; - String query = String.format(queryFormat, operator.toString(), queryLiteral); - StmtExecutor executor1 = getSqlStmtExecutor(query); - Assertions.assertNotNull(executor1); - return ((SelectStmt) executor1.getParsedStmt()).getWhereClause(); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteInPredicateRuleTest.java b/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteInPredicateRuleTest.java deleted file mode 100644 index ba13e6602a4c07..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/rewrite/RewriteInPredicateRuleTest.java +++ /dev/null @@ -1,144 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.rewrite; - -import org.apache.doris.analysis.BoolLiteral; -import org.apache.doris.analysis.Expr; -import org.apache.doris.analysis.InPredicate; -import org.apache.doris.analysis.IntLiteral; -import org.apache.doris.analysis.LargeIntLiteral; -import org.apache.doris.analysis.LiteralExpr; -import org.apache.doris.analysis.SelectStmt; -import org.apache.doris.catalog.PrimitiveType; -import org.apache.doris.qe.StmtExecutor; -import org.apache.doris.utframe.TestWithFeService; - -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.util.List; - -public class RewriteInPredicateRuleTest extends TestWithFeService { - private static final String DB_NAME = "testdb"; - private static final String TABLE_SMALL = "table_small"; - private static final String TABLE_LARGE = "table_large"; - - @Override - protected void runBeforeAll() throws Exception { - connectContext = createDefaultCtx(); - createDatabase(DB_NAME); - useDatabase(DB_NAME); - String createTableFormat = "create table %s(id %s, `date` datetime, cost bigint sum) " - + "aggregate key(`id`, `date`) distributed by hash (`id`) buckets 4 " - + "properties (\"replication_num\"=\"1\");"; - createTable(String.format(createTableFormat, TABLE_SMALL, PrimitiveType.SMALLINT)); - createTable(String.format(createTableFormat, TABLE_LARGE, PrimitiveType.LARGEINT)); - } - - @Test - public void testIntLiteralAndLargeIntLiteral() throws Exception { - // id in (TINY_INT_MIN, SMALL_INT_MIN, INT_MIN, BIG_INT_MAX, LARGE_INT_MAX) - // => id in (TINY_INT_MIN, SMALL_INT_MIN) - testBase(3, PrimitiveType.SMALLINT, IntLiteral.TINY_INT_MIN, TABLE_SMALL, - String.valueOf(IntLiteral.TINY_INT_MIN), String.valueOf(IntLiteral.SMALL_INT_MAX), - String.valueOf(IntLiteral.INT_MIN), String.valueOf(IntLiteral.BIG_INT_MAX), - LargeIntLiteral.LARGE_INT_MAX.toString()); - - // id in (TINY_INT_MIN, SMALL_INT_MIN, INT_MIN, BIG_INT_MAX, LARGE_INT_MAX) - // => id in (TINY_INT_MIN, SMALL_INT_MIN, INT_MIN, BIG_INT_MAX, LARGE_INT_MAX) - testBase(6, PrimitiveType.LARGEINT, IntLiteral.TINY_INT_MIN, TABLE_LARGE, - String.valueOf(IntLiteral.TINY_INT_MIN), String.valueOf(IntLiteral.SMALL_INT_MAX), - String.valueOf(IntLiteral.INT_MIN), String.valueOf(IntLiteral.BIG_INT_MAX), - LargeIntLiteral.LARGE_INT_MAX.toString()); - } - - @Test - public void testDecimalLiteral() throws Exception { - // type of id is smallint: id in (2.0, 3.5) => id in (2) - testBase(2, PrimitiveType.SMALLINT, 2, TABLE_SMALL, "2.0", "3.5"); - - testBase(2, PrimitiveType.SMALLINT, 3, TABLE_SMALL, "2.1", "3.0", "3.5"); - - // type of id is largeint: id in (2.0, 3.5) => id in (2) - testBase(2, PrimitiveType.LARGEINT, 2, TABLE_LARGE, "2.0", "3.5"); - } - - @Test - public void testStringLiteral() throws Exception { - // type of id is smallint: id in ("2.0", "3.5") => id in (2) - testBase(2, PrimitiveType.SMALLINT, 2, TABLE_SMALL, "\"2.0\"", "\"3.5\""); - - // type of id is largeint: id in ("2.0", "3.5") => id in (2) - testBase(2, PrimitiveType.LARGEINT, 2, TABLE_LARGE, "\"2.0\"", "\"3.5\""); - } - - @Test - public void testBooleanLiteral() throws Exception { - // type of id is smallint: id in (true, false) => id in (1, 0) - testBase(3, PrimitiveType.SMALLINT, 0, TABLE_SMALL, "false", "true"); - - // type of id is largeint: id in (true, false) => id in (1, 0) - testBase(3, PrimitiveType.LARGEINT, 1, TABLE_LARGE, "true", "false"); - } - - @Test - public void testMixedLiteralExpr() throws Exception { - // type of id is smallint: id in (1, 2.0, 3.3) -> id in (1, 2) - testBase(3, PrimitiveType.SMALLINT, 1, TABLE_SMALL, "1", "2.0", "3.3"); - // type of id is smallint: id in (1, 1.0, 1.1) => id in (1, 1) - testBase(3, PrimitiveType.SMALLINT, 1, TABLE_SMALL, "1", "1.0", "1.1"); - // type of id is smallint: id in ("1.0", 2.0, 3.3, "5.2") => id in (1, 2) - testBase(3, PrimitiveType.SMALLINT, 1, TABLE_SMALL, "\"1.0\"", "2.0", "3.3", "\"5.2\""); - // type of id is smallint: id in (false, 2.0, 3.3, "5.2", true) => id in (0, 2, 1) - testBase(4, PrimitiveType.SMALLINT, 0, TABLE_SMALL, "false", "2.0", "3.3", "\"5.2\"", "true"); - - // largeint - testBase(3, PrimitiveType.LARGEINT, 1, TABLE_LARGE, "1", "2.0", "3.3"); - testBase(3, PrimitiveType.LARGEINT, 1, TABLE_LARGE, "1", "1.0", "1.1"); - testBase(3, PrimitiveType.LARGEINT, 1, TABLE_LARGE, "\"1.0\"", "2.0", "3.3", "\"5.2\""); - testBase(4, PrimitiveType.LARGEINT, 0, TABLE_LARGE, "false", "2.0", "3.3", "\"5.2\"", "true"); - } - - @Test - public void testEmpty() throws Exception { - // type of id is smallint: id in (5.5, "6.2") => false - String query = "select /*+ SET_VAR(enable_nereids_planner=false) */ * from table_small where id in (5.5, \"6.2\");"; - StmtExecutor executor1 = getSqlStmtExecutor(query); - Expr expr1 = ((SelectStmt) executor1.getParsedStmt()).getWhereClause(); - Assertions.assertTrue(expr1 instanceof BoolLiteral); - Assertions.assertFalse(((BoolLiteral) expr1).getValue()); - } - - private void testBase(int childrenNum, PrimitiveType type, long expectedOfChild1, String... literals) - throws Exception { - List list = Lists.newArrayList(); - Lists.newArrayList(literals).forEach(e -> list.add("%s")); - list.remove(list.size() - 1); - String queryFormat = "select /*+ SET_VAR(enable_nereids_planner=false,enable_fold_constant_by_be=false) */ * from %s where id in (" + Joiner.on(", ").join(list) + ");"; - String query = String.format(queryFormat, literals); - StmtExecutor executor1 = getSqlStmtExecutor(query); - Expr expr1 = ((SelectStmt) executor1.getParsedStmt()).getWhereClause(); - Assertions.assertTrue(expr1 instanceof InPredicate); - Assertions.assertEquals(childrenNum, expr1.getChildren().size()); - Assertions.assertEquals(type, expr1.getChild(0).getType().getPrimitiveType()); - Assertions.assertEquals(type, expr1.getChild(1).getType().getPrimitiveType()); - Assertions.assertEquals(expectedOfChild1, ((LiteralExpr) expr1.getChild(1)).getLongValue()); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/statistics/StatsDeriveResultTest.java b/fe/fe-core/src/test/java/org/apache/doris/statistics/StatsDeriveResultTest.java deleted file mode 100644 index c3f04bccfc8b28..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/statistics/StatsDeriveResultTest.java +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.statistics; - -import org.apache.doris.common.Id; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.util.Date; - -public class StatsDeriveResultTest { - @Test - public void testUpdateRowCountByLimit() { - StatsDeriveResult stats = new StatsDeriveResult(100); - ColumnStatistic a = new ColumnStatistic(100, 10, null, 1, 5, 10, - 1, 100, null, null, false, - new Date().toString()); - Id id = new Id(1); - stats.addColumnStats(id, a); - StatsDeriveResult res = stats.updateByLimit(0); - Assertions.assertEquals(0, res.getRowCount()); - Assertions.assertEquals(1, res.getSlotIdToColumnStats().size()); - ColumnStatistic resColStats = res.getColumnStatsBySlotId(id); - Assertions.assertEquals(0, resColStats.ndv); - Assertions.assertEquals(1, resColStats.avgSizeByte); - Assertions.assertEquals(0, resColStats.numNulls); - Assertions.assertEquals(1, resColStats.dataSize); - Assertions.assertEquals(1, resColStats.minValue); - Assertions.assertEquals(100, resColStats.maxValue); - Assertions.assertEquals(false, resColStats.isUnKnown); - - res = stats.updateByLimit(1); - resColStats = res.getColumnStatsBySlotId(id); - Assertions.assertEquals(1, resColStats.ndv); - Assertions.assertEquals(1, resColStats.avgSizeByte); - Assertions.assertEquals(1, resColStats.numNulls); - Assertions.assertEquals(1, resColStats.dataSize); - Assertions.assertEquals(1, resColStats.minValue); - Assertions.assertEquals(100, resColStats.maxValue); - Assertions.assertEquals(false, resColStats.isUnKnown); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/transaction/DatabaseTransactionMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/transaction/DatabaseTransactionMgrTest.java index bb29a2a4dbdede..a189aba68116b8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/transaction/DatabaseTransactionMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/transaction/DatabaseTransactionMgrTest.java @@ -145,7 +145,7 @@ public Map addTransactionToTransactionMgr() throws UserException { Table testTable1 = masterEnv.getInternalCatalog().getDbOrMetaException(CatalogTestUtil.testDbId1) .getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId1, - transTablets); + transTablets, null); TransactionState transactionState1 = fakeEditLog.getTransaction(transactionId1); Map> keyToSuccessTablets = new HashMap<>(); DatabaseTransactionMgrTest.setSuccessTablet(keyToSuccessTablets, diff --git a/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java b/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java index 03a29fb453f3cd..420800a4bb3bd0 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/transaction/GlobalTransactionMgrTest.java @@ -178,7 +178,7 @@ public void testCommitTransaction() throws UserException { Table testTable1 = masterEnv.getInternalCatalog().getDbOrMetaException(CatalogTestUtil.testDbId1) .getTableOrMetaException(CatalogTestUtil.testTableId1); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId, - transTablets); + transTablets, null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId); // check status is committed Assert.assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); @@ -210,7 +210,7 @@ public void testCommitTransactionWithOneFailed() throws UserException { Lists.newArrayList(CatalogTestUtil.testBackendId1, CatalogTestUtil.testBackendId2)); // commit txn masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId, - transTablets); + transTablets, null); checkVersion(testTable1, CatalogTestUtil.testPartition1, CatalogTestUtil.testIndexId1, CatalogTestUtil.testTabletId1, CatalogTestUtil.testStartVersion, CatalogTestUtil.testStartVersion + 2, @@ -241,7 +241,7 @@ public void testCommitTransactionWithOneFailed() throws UserException { Lists.newArrayList(CatalogTestUtil.testBackendId1, CatalogTestUtil.testBackendId3)); try { masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), - transactionId2, transTablets); + transactionId2, transTablets, null); Assert.fail(); } catch (TabletQuorumFailedException e) { TransactionState transactionState = masterTransMgr.getTransactionState(CatalogTestUtil.testDbId1, transactionId2); @@ -261,7 +261,7 @@ public void testCommitTransactionWithOneFailed() throws UserException { if (true) { List transTablets = generateTabletCommitInfos(CatalogTestUtil.testTabletId1, allBackends); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId2, - transTablets); + transTablets, null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId2); // check status is committed Assert.assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); @@ -301,7 +301,7 @@ public void testCommitRoutineLoadTransaction(@Injectable TabletCommitInfo tablet List routineLoadTaskInfoList = Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList"); Map partitionIdToOffset = Maps.newHashMap(); partitionIdToOffset.put(1, 0L); - KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, 0, + KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, partitionIdToOffset, false); Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L); routineLoadTaskInfoList.add(routineLoadTaskInfo); @@ -367,7 +367,7 @@ public void testCommitRoutineLoadTransactionWithErrorMax(@Injectable TabletCommi List routineLoadTaskInfoList = Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList"); Map partitionIdToOffset = Maps.newHashMap(); partitionIdToOffset.put(1, 0L); - KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, 0, + KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, partitionIdToOffset, false); Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L); routineLoadTaskInfoList.add(routineLoadTaskInfo); @@ -430,7 +430,7 @@ public void testFinishTransaction() throws UserException { OlapTable testTable1 = (OlapTable) (masterEnv.getInternalCatalog() .getDbOrMetaException(CatalogTestUtil.testDbId1).getTableOrMetaException(CatalogTestUtil.testTableId1)); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId, - transTablets); + transTablets, null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId); Assert.assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); checkTableVersion(testTable1, 1, 2); @@ -498,7 +498,7 @@ public void testFinishTransactionWithOneFailed() throws UserException { List transTablets = generateTabletCommitInfos(CatalogTestUtil.testTabletId1, Lists.newArrayList(CatalogTestUtil.testBackendId1, CatalogTestUtil.testBackendId2)); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId, - transTablets); + transTablets, null); // follower catalog replay the transaction TransactionState transactionState = fakeEditLog.getTransaction(transactionId); @@ -563,7 +563,7 @@ public void testFinishTransactionWithOneFailed() throws UserException { Lists.newArrayList(CatalogTestUtil.testBackendId1, CatalogTestUtil.testBackendId3)); try { masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), - transactionId2, transTablets); + transactionId2, transTablets, null); Assert.fail(); } catch (TabletQuorumFailedException e) { TransactionState transactionState = masterTransMgr.getTransactionState(CatalogTestUtil.testDbId1, @@ -577,7 +577,7 @@ public void testFinishTransactionWithOneFailed() throws UserException { if (true) { List transTablets = generateTabletCommitInfos(CatalogTestUtil.testTabletId1, allBackends); masterTransMgr.commitTransaction(CatalogTestUtil.testDbId1, Lists.newArrayList(testTable1), transactionId2, - transTablets); + transTablets, null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId2); // check status is commit Assert.assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/AnotherDemoTest.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/AnotherDemoTest.java index 4f18a60fa817be..2da5a3cabd871e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/AnotherDemoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/AnotherDemoTest.java @@ -120,7 +120,7 @@ public void testCreateDbAndTable() throws Exception { } // 5. query // TODO: we can not process real query for now. So it has to be a explain query - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1"; + String queryStr = "explain select /*+ SET_VAR(disable_nereids_rules=PRUNE_EMPTY_PARTITION) */ * from db1.tbl1"; StmtExecutor stmtExecutor = new StmtExecutor(ctx, queryStr); stmtExecutor.execute(); Planner planner = stmtExecutor.planner(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoTest.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoTest.java index fec4081a41d288..289f6c9902d8f8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoTest.java @@ -108,7 +108,7 @@ public void testCreateDbAndTable() throws Exception { // 7. query // TODO: we can not process real query for now. So it has to be a explain query - String queryStr = "explain select /*+ SET_VAR(enable_nereids_planner=false) */ * from db1.tbl1"; + String queryStr = "explain select /*+ SET_VAR(disable_nereids_rules=PRUNE_EMPTY_PARTITION) */ * from db1.tbl1"; StmtExecutor stmtExecutor = new StmtExecutor(connectContext, queryStr); stmtExecutor.execute(); Planner planner = stmtExecutor.planner(); diff --git a/fe/fe-core/src/test/resources/META-INF/services/org.apache.doris.mysql.privilege.AccessControllerFactory b/fe/fe-core/src/test/resources/META-INF/services/org.apache.doris.mysql.privilege.AccessControllerFactory new file mode 100644 index 00000000000000..83924e7e0f6073 --- /dev/null +++ b/fe/fe-core/src/test/resources/META-INF/services/org.apache.doris.mysql.privilege.AccessControllerFactory @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +org.apache.doris.nereids.privileges.CustomAccessControllerFactory \ No newline at end of file diff --git a/gensrc/proto/cloud.proto b/gensrc/proto/cloud.proto index 268744a0088f61..93420bddbf6382 100644 --- a/gensrc/proto/cloud.proto +++ b/gensrc/proto/cloud.proto @@ -1384,6 +1384,7 @@ message UpdateDeleteBitmapRequest { repeated int64 versions = 9; // Serialized roaring bitmaps indexed with {rowset_id, segment_id, version} repeated bytes segment_delete_bitmaps = 10; + optional bool unlock = 11; } message UpdateDeleteBitmapResponse { @@ -1411,6 +1412,18 @@ message GetDeleteBitmapResponse { repeated bytes segment_delete_bitmaps = 5; } +message RemoveDeleteBitmapRequest { + optional string cloud_unique_id = 1; // For auth + optional int64 tablet_id = 2; + repeated string rowset_ids = 3; + repeated int64 begin_versions = 4; + repeated int64 end_versions = 5; +} + +message RemoveDeleteBitmapResponse { + optional MetaServiceResponseStatus status = 1; +} + message PendingDeleteBitmapPB { repeated bytes delete_bitmap_keys = 1; } @@ -1550,6 +1563,7 @@ service MetaService { rpc update_delete_bitmap(UpdateDeleteBitmapRequest) returns(UpdateDeleteBitmapResponse); rpc get_delete_bitmap(GetDeleteBitmapRequest) returns(GetDeleteBitmapResponse); rpc get_delete_bitmap_update_lock(GetDeleteBitmapUpdateLockRequest) returns(GetDeleteBitmapUpdateLockResponse); + rpc remove_delete_bitmap(RemoveDeleteBitmapRequest) returns(RemoveDeleteBitmapResponse); // routine load progress rpc get_rl_task_commit_attach(GetRLTaskCommitAttachRequest) returns (GetRLTaskCommitAttachResponse); diff --git a/gensrc/proto/descriptors.proto b/gensrc/proto/descriptors.proto index 21fc646c92d12d..99cd99410ed7de 100644 --- a/gensrc/proto/descriptors.proto +++ b/gensrc/proto/descriptors.proto @@ -73,5 +73,6 @@ message POlapTableSchemaParam { optional int64 timestamp_ms = 11 [default = 0]; optional string timezone = 12; optional int32 auto_increment_column_unique_id = 13 [default = -1]; + optional int32 nano_seconds = 14 [default = 0]; }; diff --git a/gensrc/proto/internal_service.proto b/gensrc/proto/internal_service.proto index 4ac4fd24f3b7e5..9abf9d7ea65036 100644 --- a/gensrc/proto/internal_service.proto +++ b/gensrc/proto/internal_service.proto @@ -107,6 +107,7 @@ message PTabletWriterOpenRequest { optional bool write_file_cache = 17; optional string storage_vault_id = 18; optional int32 sender_id = 19; + optional int64 workload_group_id = 20; }; message PTabletWriterOpenResult { @@ -232,6 +233,7 @@ enum PFragmentRequestVersion { VERSION_1 = 1; // only one TExecPlanFragmentParams in request VERSION_2 = 2; // multi TExecPlanFragmentParams in request VERSION_3 = 3; // multi TPipelineFragmentParams in request + VERSION_4 = 4; // multi TPipelineFragmentParams with optimized common fields in request. Used by 2.1 }; message PExecPlanFragmentRequest { diff --git a/gensrc/proto/olap_file.proto b/gensrc/proto/olap_file.proto index 2e9fa94a343f35..9032b5ba4abe0f 100644 --- a/gensrc/proto/olap_file.proto +++ b/gensrc/proto/olap_file.proto @@ -624,4 +624,5 @@ message PartialUpdateInfoPB { optional bool is_schema_contains_auto_inc_column = 10 [default = false]; repeated string default_values = 11; optional int64 max_version_in_flush_phase = 12 [default = -1]; + optional int32 nano_seconds = 13 [default = 0]; } diff --git a/gensrc/script/doris_builtins_functions.py b/gensrc/script/doris_builtins_functions.py index 976990d4ef85d4..214dc287a50e0f 100644 --- a/gensrc/script/doris_builtins_functions.py +++ b/gensrc/script/doris_builtins_functions.py @@ -1826,6 +1826,8 @@ [['json_parse_notnull_error_to_value'], 'JSONB', ['VARCHAR', 'VARCHAR'], ''], [['json_parse_notnull_error_to_invalid'], 'JSONB', ['VARCHAR'], ''], + [['json_search'], 'JSONB', ['VARCHAR', 'VARCHAR', 'VARCHAR'], 'ALWAYS_NULLABLE'], + [['json_exists_path'], 'BOOLEAN', ['JSONB', 'VARCHAR'], ''], [['json_exists_path'], 'BOOLEAN', ['JSONB', 'STRING'], ''], [['json_type'], 'STRING', ['JSONB', 'VARCHAR'], 'ALWAYS_NULLABLE'], diff --git a/gensrc/script/gen_builtins_functions.py b/gensrc/script/gen_builtins_functions.py index e50e0d4ede9bfe..619a30d4e154ef 100755 --- a/gensrc/script/gen_builtins_functions.py +++ b/gensrc/script/gen_builtins_functions.py @@ -171,7 +171,7 @@ def generate_fe_registry_init(filename): for category, functions in doris_builtins_functions.visible_functions.items(): java_registry_file.write(" init{0}Builtins(functionSet);\n".format(category.capitalize())) - # add non_null_result_with_null_param_functions + # add null_result_with_one_null_param_functions java_registry_file.write(" Set funcNames = Sets.newHashSet();\n") for entry in doris_builtins_functions.null_result_with_one_null_param_functions: java_registry_file.write(" funcNames.add(\"%s\");\n" % entry) @@ -183,10 +183,11 @@ def generate_fe_registry_init(filename): java_registry_file.write(" nondeterministicFuncNames.add(\"%s\");\n" % entry) java_registry_file.write(" functionSet.buildNondeterministicFunctions(nondeterministicFuncNames);\n"); - java_registry_file.write(" funcNames = Sets.newHashSet();\n") - for entry in doris_builtins_functions.null_result_with_one_null_param_functions: - java_registry_file.write(" funcNames.add(\"%s\");\n" % entry) - java_registry_file.write(" functionSet.buildNullResultWithOneNullParamFunction(funcNames);\n"); + # add null_result_with_one_null_param_functions + # java_registry_file.write(" funcNames = Sets.newHashSet();\n") + # for entry in doris_builtins_functions.null_result_with_one_null_param_functions: + # java_registry_file.write(" funcNames.add(\"%s\");\n" % entry) + # java_registry_file.write(" functionSet.buildNullResultWithOneNullParamFunction(funcNames);\n"); java_registry_file.write(" }\n") java_registry_file.write("\n") diff --git a/gensrc/thrift/DataSinks.thrift b/gensrc/thrift/DataSinks.thrift index e46f7e6067cfef..ed7ccee69cd9a1 100644 --- a/gensrc/thrift/DataSinks.thrift +++ b/gensrc/thrift/DataSinks.thrift @@ -188,6 +188,7 @@ struct TDataStreamSink { 10: optional Descriptors.TOlapTableLocationParam tablet_sink_location 11: optional i64 tablet_sink_txn_id 12: optional Types.TTupleId tablet_sink_tuple_id + 13: optional list tablet_sink_exprs } struct TMultiCastDataStreamSink { diff --git a/gensrc/thrift/FrontendService.thrift b/gensrc/thrift/FrontendService.thrift index 9077dbd3cec2c8..3190d331851df5 100644 --- a/gensrc/thrift/FrontendService.thrift +++ b/gensrc/thrift/FrontendService.thrift @@ -1183,6 +1183,7 @@ enum TBinlogType { REPLACE_PARTITIONS = 12, TRUNCATE_TABLE = 13, RENAME_TABLE = 14, + RENAME_COLUMN = 15, } struct TBinlog { diff --git a/gensrc/thrift/HeartbeatService.thrift b/gensrc/thrift/HeartbeatService.thrift index 7c94bc9ce0c871..c03f04a6543f22 100644 --- a/gensrc/thrift/HeartbeatService.thrift +++ b/gensrc/thrift/HeartbeatService.thrift @@ -40,7 +40,7 @@ struct TMasterInfo { 8: optional i64 backend_id 9: optional list frontend_infos 10: optional string meta_service_endpoint; - 11: optional string cloud_instance_id; + 11: optional string cloud_unique_id; } struct TBackendInfo { diff --git a/gensrc/thrift/PaloInternalService.thrift b/gensrc/thrift/PaloInternalService.thrift index 7875aa2bec0526..48f41e8e0ab9f9 100644 --- a/gensrc/thrift/PaloInternalService.thrift +++ b/gensrc/thrift/PaloInternalService.thrift @@ -342,6 +342,9 @@ struct TQueryOptions { 130: optional bool enable_adaptive_pipeline_task_serial_read_on_limit = true; 131: optional i32 adaptive_pipeline_task_serial_read_on_limit = 10000; + 132: optional i32 parallel_prepare_threshold = 0; + 133: optional i32 partition_topn_max_partitions = 1024; + 134: optional i32 partition_topn_pre_partition_rows = 1000; // For cloud, to control if the content would be written into file cache // In write path, to control if the content would be written into file cache. // In read path, read from file cache or remote storage when execute query. @@ -811,11 +814,27 @@ struct TPipelineFragmentParams { 41: optional i64 wal_id 42: optional i64 content_length 43: optional Types.TNetworkAddress current_connect_fe + // Used by 2.1 + 44: optional list topn_filter_source_node_ids // For cloud 1000: optional bool is_mow_table; } struct TPipelineFragmentParamsList { - 1: optional list params_list; + 1: optional list params_list; + 2: optional Descriptors.TDescriptorTable desc_tbl; + // scan node id -> scan range params, only for external file scan + 3: optional map file_scan_params; + 4: optional Types.TNetworkAddress coord; + 5: optional TQueryGlobals query_globals; + 6: optional Types.TResourceInfo resource_info; + // The total number of fragments on same BE host + 7: optional i32 fragment_num_on_host + 8: optional TQueryOptions query_options + 9: optional bool is_nereids = true; + 10: optional list workload_groups + 11: optional Types.TUniqueId query_id + 12: optional list topn_filter_source_node_ids + 13: optional Types.TNetworkAddress runtime_filter_merge_addr } diff --git a/regression-test/conf/regression-conf.groovy b/regression-test/conf/regression-conf.groovy index d3d3ee264cfad7..676133f7be4d92 100644 --- a/regression-test/conf/regression-conf.groovy +++ b/regression-test/conf/regression-conf.groovy @@ -170,6 +170,16 @@ extHdfsPort = 4007 extHiveServerPort= 7001 extHiveHmsUser = "****" extHiveHmsPassword= "***********" +dfsNameservices="" +dfsNameservicesPort=8020 +dfsHaNamenodesHdfsCluster="" +dfsNamenodeRpcAddress1="" +dfsNamenodeRpcAddress2="" +dfsNamenodeRpcAddress3="" +hadoopSecurityAuthentication = "" +hadoopKerberosKeytabPath = "" +hadoopKerberosPrincipal = "" + //paimon catalog test config for bigdata enableExternalPaimonTest = false diff --git a/regression-test/data/backup_restore/test_backup_restore_atomic_with_view.out b/regression-test/data/backup_restore/test_backup_restore_atomic_with_view.out new file mode 100644 index 00000000000000..cad6dbe8fd8b5c --- /dev/null +++ b/regression-test/data/backup_restore/test_backup_restore_atomic_with_view.out @@ -0,0 +1,60 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +-- !sql -- +6 6 +7 7 +8 8 +9 9 +10 10 + +-- !sql -- +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 + +-- !sql -- +6 6 +7 7 +8 8 +9 9 +10 10 + +-- !sql -- +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 + +-- !sql -- +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 + diff --git a/regression-test/data/catalog_recycle_bin_p0/test_insert_overwrite_recover.out b/regression-test/data/catalog_recycle_bin_p0/test_insert_overwrite_recover.out new file mode 100644 index 00000000000000..eae52360da60a8 --- /dev/null +++ b/regression-test/data/catalog_recycle_bin_p0/test_insert_overwrite_recover.out @@ -0,0 +1,14 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_check_1 -- +1 a 2022-01-02 +2 a 2023-01-02 +3 a 2024-01-02 + +-- !select_check_1 -- +3 a 2024-01-02 + +-- !select_check_1 -- +1 a 2022-01-02 +2 a 2023-01-02 +3 a 2024-01-02 + diff --git a/regression-test/data/compaction/test_cu_compaction_remove_old_version_delete_bitmap.out b/regression-test/data/compaction/test_cu_compaction_remove_old_version_delete_bitmap.out new file mode 100644 index 00000000000000..1c3611fe0b7506 --- /dev/null +++ b/regression-test/data/compaction/test_cu_compaction_remove_old_version_delete_bitmap.out @@ -0,0 +1,29 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +0 0 0 +1 8 8 + +-- !sql -- +0 0 0 +1 8 8 + +-- !sql -- +0 0 0 +1 13 13 + +-- !sql -- +0 0 0 +1 13 13 + +-- !sql -- +0 0 0 +1 23 23 + +-- !sql -- +0 0 0 +1 23 23 + +-- !sql -- +0 0 0 +1 28 28 + diff --git a/regression-test/data/correctness/test_column_nullable_cache.out b/regression-test/data/correctness/test_column_nullable_cache.out new file mode 100644 index 00000000000000..eac491df2120bb --- /dev/null +++ b/regression-test/data/correctness/test_column_nullable_cache.out @@ -0,0 +1,6 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !test1 -- + +-- !test2 -- +0 + diff --git a/regression-test/data/correctness/test_scan_keys_with_bool_type.out b/regression-test/data/correctness/test_scan_keys_with_bool_type.out new file mode 100644 index 00000000000000..d0448a74ed72fc --- /dev/null +++ b/regression-test/data/correctness/test_scan_keys_with_bool_type.out @@ -0,0 +1,43 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select1 -- +-100 false -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 true -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N +-100 true 92 true 2024-02-16T04:37:37 2.34234230324234E7 \N + +-- !select2 -- +-100 false -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 true -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + +-- !select3 -- +-100 false -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 true -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + +-- !select3 -- +-100 0 -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 1 -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N +-100 1 92 true 2024-02-16T04:37:37 2.34234230324234E7 \N +-100 2 -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + +-- !select4 -- +-100 1 -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N +-100 2 -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + +-- !select5 -- +-100 0 -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 1 -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N +-100 2 -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + +-- !select6 -- +-100 a -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 b -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N +-100 b 92 true 2024-02-16T04:37:37 2.34234230324234E7 \N +-100 c 92 true 2024-02-16T04:37:37 2.34234230324234E7 \N + +-- !select7 -- +-100 a -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 b -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + +-- !select8 -- +-100 a -82 false 2023-11-11T10:49:43 8.40968969872149E8 \N +-100 b -82 true 2024-02-16T04:37:37 -1.299962421904282E9 \N + diff --git a/regression-test/data/correctness_p0/test_always_nullable_window_function_legacy_planner.out b/regression-test/data/correctness_p0/test_always_nullable_window_function_legacy_planner.out deleted file mode 100644 index b94f3ae38bbfda..00000000000000 --- a/regression-test/data/correctness_p0/test_always_nullable_window_function_legacy_planner.out +++ /dev/null @@ -1,97 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select_default -- -21 04-21-11 1 1 1 2 1.0 1 1 \N \N -21 04-21-11 1 1 1 2 1.0 1 1 \N \N -22 04-22-10-21 0 0 1 1 0.5 1 0 \N \N -22 04-22-10-21 0 1 1 2 0.6666666666666666 1 0 \N \N -22 04-22-10-21 1 0 0 1 0.3333333333333333 1 0 \N \N -22 04-22-10-21 1 0 1 1 0.5 1 0 \N \N -23 04-23-10 1 1 1 2 1.0 1 1 \N \N -23 04-23-10 1 1 1 2 1.0 1 1 \N \N -24 02-24-10-21 1 1 1 2 1.0 1 1 \N \N -24 02-24-10-21 1 1 1 2 1.0 1 1 \N \N - --- !select_empty_window -- -21 04-21-11 1 \N \N \N \N \N \N \N \N -21 04-21-11 1 1 1 1 1.0 1 1 \N \N -22 04-22-10-21 0 \N \N \N \N \N \N \N 0 -22 04-22-10-21 0 1 1 1 1.0 1 1 0 \N -22 04-22-10-21 1 0 0 0 0.0 0 0 \N 1 -22 04-22-10-21 1 0 0 0 0.0 0 0 1 \N -23 04-23-10 1 \N \N \N \N \N \N \N \N -23 04-23-10 1 1 1 1 1.0 1 1 \N \N -24 02-24-10-21 1 \N \N \N \N \N \N \N \N -24 02-24-10-21 1 1 1 1 1.0 1 1 \N \N - --- !select_default_nullable -- -21 04-21-11 1 1 1 2 1.0 1 1 \N \N -21 04-21-11 1 1 1 2 1.0 1 1 \N \N -22 04-22-10-21 0 0 1 1 0.5 1 0 \N \N -22 04-22-10-21 0 1 1 2 0.6666666666666666 1 0 \N \N -22 04-22-10-21 1 0 0 1 0.3333333333333333 1 0 \N \N -22 04-22-10-21 1 0 1 1 0.5 1 0 \N \N -23 04-23-10 1 1 1 2 1.0 1 1 \N \N -23 04-23-10 1 1 1 2 1.0 1 1 \N \N -24 02-24-10-21 1 1 1 2 1.0 1 1 \N \N -24 02-24-10-21 1 1 1 2 1.0 1 1 \N \N - --- !select_empty_window_nullable -- -21 04-21-11 1 \N \N \N \N \N \N \N \N -21 04-21-11 1 1 1 1 1.0 1 1 \N \N -22 04-22-10-21 0 \N \N \N \N \N \N \N 0 -22 04-22-10-21 0 1 1 1 1.0 1 1 0 \N -22 04-22-10-21 1 0 0 0 0.0 0 0 \N 1 -22 04-22-10-21 1 0 0 0 0.0 0 0 1 \N -23 04-23-10 1 \N \N \N \N \N \N \N \N -23 04-23-10 1 1 1 1 1.0 1 1 \N \N -24 02-24-10-21 1 \N \N \N \N \N \N \N \N -24 02-24-10-21 1 1 1 1 1.0 1 1 \N \N - --- !select_default_old_planner -- -21 04-21-11 1 1 1 2 1.0 1 1 \N 1 -21 04-21-11 1 1 1 2 1.0 1 1 1 \N -22 04-22-10-21 0 0 1 1 0.5 1 0 \N 1 -22 04-22-10-21 0 1 1 2 0.6666666666666666 1 0 1 1 -22 04-22-10-21 1 0 0 1 0.3333333333333333 1 0 0 0 -22 04-22-10-21 1 0 1 1 0.5 1 0 0 \N -23 04-23-10 1 1 1 2 1.0 1 1 \N 1 -23 04-23-10 1 1 1 2 1.0 1 1 1 \N -24 02-24-10-21 1 1 1 2 1.0 1 1 \N 1 -24 02-24-10-21 1 1 1 2 1.0 1 1 1 \N - --- !select_empty_window_old_planner -- -21 04-21-11 1 \N \N \N \N \N \N \N \N -21 04-21-11 1 1 1 1 1.0 1 1 \N \N -22 04-22-10-21 0 \N \N \N \N \N \N \N 0 -22 04-22-10-21 0 1 1 1 1.0 1 1 0 \N -22 04-22-10-21 1 0 0 0 0.0 0 0 \N 1 -22 04-22-10-21 1 0 0 0 0.0 0 0 1 \N -23 04-23-10 1 \N \N \N \N \N \N \N \N -23 04-23-10 1 1 1 1 1.0 1 1 \N \N -24 02-24-10-21 1 \N \N \N \N \N \N \N \N -24 02-24-10-21 1 1 1 1 1.0 1 1 \N \N - --- !select_default_nullable_old_planner -- -21 04-21-11 1 1 1 2 1.0 1 1 \N 1 -21 04-21-11 1 1 1 2 1.0 1 1 1 \N -22 04-22-10-21 0 0 1 1 0.5 1 0 \N 1 -22 04-22-10-21 0 1 1 2 0.6666666666666666 1 0 1 1 -22 04-22-10-21 1 0 0 1 0.3333333333333333 1 0 0 0 -22 04-22-10-21 1 0 1 1 0.5 1 0 0 \N -23 04-23-10 1 1 1 2 1.0 1 1 \N 1 -23 04-23-10 1 1 1 2 1.0 1 1 1 \N -24 02-24-10-21 1 1 1 2 1.0 1 1 \N 1 -24 02-24-10-21 1 1 1 2 1.0 1 1 1 \N - --- !select_empty_window_nullable_old_planner -- -21 04-21-11 1 \N \N \N \N \N \N \N \N -21 04-21-11 1 1 1 1 1.0 1 1 \N \N -22 04-22-10-21 0 \N \N \N \N \N \N \N 0 -22 04-22-10-21 0 1 1 1 1.0 1 1 0 \N -22 04-22-10-21 1 0 0 0 0.0 0 0 \N 1 -22 04-22-10-21 1 0 0 0 0.0 0 0 1 \N -23 04-23-10 1 \N \N \N \N \N \N \N \N -23 04-23-10 1 1 1 1 1.0 1 1 \N \N -24 02-24-10-21 1 \N \N \N \N \N \N \N \N -24 02-24-10-21 1 1 1 1 1.0 1 1 \N \N - diff --git a/regression-test/data/correctness_p0/test_colocate_join.out b/regression-test/data/correctness_p0/test_colocate_join.out deleted file mode 100644 index aa5795a72f84d7..00000000000000 --- a/regression-test/data/correctness_p0/test_colocate_join.out +++ /dev/null @@ -1,22 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !global1 -- -1 jack 2022-01-01 1 jack 2022-01-01 10 -2 jack1 2022-01-02 2 jack1 2022-01-02 11 -3 jack2 2022-01-03 3 jack2 2022-01-03 12 -4 jack3 2022-02-01 4 jack3 2022-02-01 13 -5 jack4 2022-02-01 5 jack4 2022-02-01 14 - --- !global2 -- -1 jack 2022-01-01 1 jack 2022-01-01 10 -2 jack1 2022-01-02 2 jack1 2022-01-02 11 -3 jack2 2022-01-03 3 jack2 2022-01-03 12 -4 jack3 2022-02-01 4 jack3 2022-02-01 13 -5 jack4 2022-02-01 5 jack4 2022-02-01 14 - --- !global3 -- -1 jack 2022-01-01 1 jack 2022-01-01 10 -2 jack1 2022-01-02 2 jack1 2022-01-02 11 -3 jack2 2022-01-03 3 jack2 2022-01-03 12 -4 jack3 2022-02-01 4 jack3 2022-02-01 13 -5 jack4 2022-02-01 5 jack4 2022-02-01 14 - diff --git a/regression-test/data/correctness_p0/test_default_bitmap_empty.out b/regression-test/data/correctness_p0/test_default_bitmap_empty.out new file mode 100644 index 00000000000000..33cc825e767f55 --- /dev/null +++ b/regression-test/data/correctness_p0/test_default_bitmap_empty.out @@ -0,0 +1,43 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !insert_into1 -- +0 +0 +0 +0 + +-- !stream_load_csv1 -- +0 +0 +0 +0 +0 +0 + +-- !select_2 -- +0 +0 +0 +0 + +-- !stream_load_csv2 -- +0 +0 +0 +0 +0 +0 + +-- !insert_into3 -- +0 +0 +0 +0 + +-- !stream_load_csv3 -- +0 +0 +0 +0 +0 +0 + diff --git a/regression-test/data/correctness_p0/test_default_bitmap_empty_streamload.csv b/regression-test/data/correctness_p0/test_default_bitmap_empty_streamload.csv new file mode 100644 index 00000000000000..f4ec2d7748a0a2 --- /dev/null +++ b/regression-test/data/correctness_p0/test_default_bitmap_empty_streamload.csv @@ -0,0 +1,2 @@ +5,5 +6,6 \ No newline at end of file diff --git a/regression-test/data/correctness_p0/test_first_value_window_legacy_planner.out b/regression-test/data/correctness_p0/test_first_value_window_legacy_planner.out deleted file mode 100644 index 9951ad95c60bf4..00000000000000 --- a/regression-test/data/correctness_p0/test_first_value_window_legacy_planner.out +++ /dev/null @@ -1,43 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select_default -- -21 04-21-11 1 1 -22 04-22-10-21 0 0 -22 04-22-10-21 1 1 -23 04-23-10 1 1 -24 02-24-10-21 1 1 - --- !select_default -- -21 04-21-11 ["amory", "clever"] ["amory", "clever"] -22 04-22-10-21 ["doris", "aws", "greate"] ["doris", "aws", "greate"] -22 04-22-10-21 ["is ", "cute", "tea"] ["is ", "cute", "tea"] -23 04-23-10 ["p7", "year4"] ["p7", "year4"] -24 02-24-10-21 [""] [""] - --- !select_always_nullable -- -21 04-21-11 ["amory", "clever"] \N \N -22 04-22-10-21 ["doris", "aws", "greate"] \N \N -22 04-22-10-21 ["is ", "cute", "tea"] 1 999 -23 04-23-10 ["p7", "year4"] \N \N -24 02-24-10-21 [""] \N \N - --- !select_default2 -- -21 04-21-11 1 1 1 1 -22 04-22-10-21 0 0 0 0 -22 04-22-10-21 1 0 0 0 -23 04-23-10 1 1 1 1 -24 02-24-10-21 1 1 1 1 - --- !select_default3 -- -1 21 04-21-11 \N \N \N 2 -2 21 04-21-12 2 \N \N 2 -3 21 04-21-13 3 2 2 2 -4 22 04-22-10-21 \N \N \N \N -5 22 04-22-10-22 \N \N \N 5 -6 22 04-22-10-23 5 \N \N 5 -7 22 04-22-10-24 \N 5 5 5 -8 22 04-22-10-25 9 \N \N 9 -9 23 04-23-11 \N \N \N 10 -10 23 04-23-12 10 \N \N 10 -11 23 04-23-13 \N 10 10 10 -12 24 02-24-10-21 \N \N \N \N - diff --git a/regression-test/data/correctness_p0/test_last_value_window_legacy_planner.out b/regression-test/data/correctness_p0/test_last_value_window_legacy_planner.out deleted file mode 100644 index 5792ccb0aa5fd3..00000000000000 --- a/regression-test/data/correctness_p0/test_last_value_window_legacy_planner.out +++ /dev/null @@ -1,30 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select_default -- -21 04-21-11 1 1 -22 04-22-10-21 0 1 -22 04-22-10-21 1 1 -23 04-23-10 1 1 -24 02-24-10-21 1 1 - --- !select_default -- -21 04-21-11 ["amory", "clever"] ["amory", "clever"] -22 04-22-10-21 ["doris", "aws", "greate"] ["doris", "aws", "greate"] -22 04-22-10-21 ["is ", "cute", "tea"] ["doris", "aws", "greate"] -23 04-23-10 ["p7", "year4"] ["p7", "year4"] -24 02-24-10-21 [""] [""] - --- !select_null -- -1 21 04-21-11 1 \N 1 -2 21 04-21-12 \N \N 1 -3 21 04-21-13 \N \N \N -4 22 04-22-10 0 8 8 -5 22 04-22-11 8 \N 8 -6 22 04-22-12 \N \N 8 -7 23 04-23-13 \N 2 2 -8 23 04-23-14 2 \N 2 -9 23 04-23-15 \N \N 2 -10 23 04-23-16 \N \N \N -11 24 02-24-10-22 \N 9 9 -12 24 02-24-10-23 9 \N 9 -13 24 02-24-10-24 \N \N 9 - diff --git a/regression-test/data/correctness_p0/test_set_operation.out b/regression-test/data/correctness_p0/test_set_operation.out index 09fa8314065ac0..ca31c6a9f6232c 100644 --- a/regression-test/data/correctness_p0/test_set_operation.out +++ b/regression-test/data/correctness_p0/test_set_operation.out @@ -7,7 +7,4 @@ aaaa bbbb -- !select1 -- -aaaa -bbbb --- !select1 -- diff --git a/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_bitmap.out b/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_bitmap.out index 28d6859d5f1eab..af7d4058f43d3d 100644 --- a/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_bitmap.out +++ b/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_bitmap.out @@ -23,27 +23,3 @@ 3 8 0,1,2,3,5,99,876,2445 3 8 0,1,2,3,5,99,876,2445 --- !sql -- -1 1 1 -2 2 3,1000 -3 3 999,1000,888888 - --- !sql -- -1 1 1 -1 4 5,90,876,1000 -2 2 3,1000 -3 3 999,1000,888888 -3 8 0,1,2,3,5,99,876,2445 - --- !sql -- -1 1 1 -1 1 1 -1 4 5,90,876,1000 -1 4 5,90,876,1000 -2 2 3,1000 -2 2 3,1000 -3 3 999,1000,888888 -3 3 999,1000,888888 -3 8 0,1,2,3,5,99,876,2445 -3 8 0,1,2,3,5,99,876,2445 - diff --git a/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_hll.out b/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_hll.out index deeceba2986d59..59193a606ba3c2 100644 --- a/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_hll.out +++ b/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_hll.out @@ -55,59 +55,3 @@ -- !from_dup -- 8 --- !sql -- -1 0 -2 6 -3 2 - --- !sql -- -7 - --- !from_agg -- -1 0 -2 6 -3 2 - --- !from_agg -- -7 - --- !from_values -- -1 0 -1 1 -2 1 -2 6 -3 2 -4 1 - --- !from_values -- -1 1 -2 6 -3 2 -4 1 - --- !from_values -- -8 - --- !from_dup -- -1 0 -1 0 -1 1 -1 1 -2 1 -2 1 -2 6 -2 6 -3 2 -3 2 -4 1 -4 1 - --- !from_dup -- -1 1 -2 6 -3 2 -4 1 - --- !from_dup -- -8 - diff --git a/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_quantile_state.out b/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_quantile_state.out index d75ad2aa66c7d5..8ec7cd9de3bbda 100644 --- a/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_quantile_state.out +++ b/regression-test/data/data_model_p0/duplicate/storage/test_duplicate_quantile_state.out @@ -41,45 +41,3 @@ 2 -100.0 0.0 1.0 3 0.0 1.0 2.0 --- !sql -- -1 -1.0 -1.0 -1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_agg -- -1 -1.0 -1.0 -1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_values -- -1 -2.0 -2.0 -2.0 -1 -1.0 -1.0 -1.0 -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_values -- -1 -2.0 -1.0 0.0 -2 -100.0 0.0 1.0 -3 0.0 1.0 2.0 - --- !from_dup -- -1 -2.0 -2.0 -2.0 -1 -2.0 -2.0 -2.0 -1 -1.0 -1.0 -1.0 -1 -1.0 -1.0 -1.0 -1 0.0 0.0 0.0 -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -2 -100.0 -100.0 -100.0 -2 0.0 0.5 1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 -3 0.0 1.0 2.0 - --- !from_dup -- -1 -2.0 -1.0 0.0 -2 -100.0 0.0 1.0 -3 0.0 1.0 2.0 - diff --git a/regression-test/data/data_model_p0/unique/test_unique_bitmap.out b/regression-test/data/data_model_p0/unique/test_unique_bitmap.out index 2240e4201ad60a..85f559e2387e07 100644 --- a/regression-test/data/data_model_p0/unique/test_unique_bitmap.out +++ b/regression-test/data/data_model_p0/unique/test_unique_bitmap.out @@ -19,23 +19,3 @@ 2 2 3,1000 3 8 0,1,2,3,5,99,876,2445 --- !sql -- -1 1 1 -2 2 3,1000 -3 3 999,1000,888888 - --- !sql -- -1 4 5,90,876,1000 -2 2 3,1000 -3 8 0,1,2,3,5,99,876,2445 - --- !sql -- -1 1 1 -2 2 3,1000 -3 3 999,1000,888888 - --- !sql -- -1 4 5,90,876,1000 -2 2 3,1000 -3 8 0,1,2,3,5,99,876,2445 - diff --git a/regression-test/data/data_model_p0/unique/test_unique_hll.out b/regression-test/data/data_model_p0/unique/test_unique_hll.out index c6bc798fa3dbc7..89ce9ff475e42d 100644 --- a/regression-test/data/data_model_p0/unique/test_unique_hll.out +++ b/regression-test/data/data_model_p0/unique/test_unique_hll.out @@ -91,95 +91,3 @@ -- !from_uniq -- 4 --- !sql -- -1 0 -2 6 -3 2 - --- !sql -- -7 - --- !from_agg -- -1 0 -2 6 -3 2 - --- !from_agg -- -7 - --- !from_values -- -1 1 -2 1 -3 2 -4 1 - --- !from_values -- -1 1 -2 1 -3 2 -4 1 - --- !from_values -- -4 - --- !from_uniq -- -1 1 -2 1 -3 2 -4 1 - --- !from_uniq -- -1 1 -2 1 -3 2 -4 1 - --- !from_uniq -- -4 - --- !sql -- -1 0 -2 6 -3 2 - --- !sql -- -7 - --- !from_agg -- -1 0 -2 6 -3 2 - --- !from_agg -- -7 - --- !from_values -- -1 1 -2 1 -3 2 -4 1 - --- !from_values -- -1 1 -2 1 -3 2 -4 1 - --- !from_values -- -4 - --- !from_uniq -- -1 1 -2 1 -3 2 -4 1 - --- !from_uniq -- -1 1 -2 1 -3 2 -4 1 - --- !from_uniq -- -4 - diff --git a/regression-test/data/data_model_p0/unique/test_unique_quantile_state.out b/regression-test/data/data_model_p0/unique/test_unique_quantile_state.out index 280ea0a2cc186f..17d0ccd0a35392 100644 --- a/regression-test/data/data_model_p0/unique/test_unique_quantile_state.out +++ b/regression-test/data/data_model_p0/unique/test_unique_quantile_state.out @@ -59,63 +59,3 @@ 2 -100.0 -100.0 -100.0 3 0.0 1.0 2.0 --- !sql -- -1 -1.0 -1.0 -1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_agg -- -1 -1.0 -1.0 -1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_values -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !from_values -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !from_uniq -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !from_uniq -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !sql -- -1 -1.0 -1.0 -1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_agg -- -1 -1.0 -1.0 -1.0 -2 0.0 0.5 1.0 -3 0.0 1.0 2.0 - --- !from_values -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !from_values -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !from_uniq -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - --- !from_uniq -- -1 0.0 0.0 0.0 -2 -100.0 -100.0 -100.0 -3 0.0 1.0 2.0 - diff --git a/regression-test/data/data_model_p0/unique/test_unique_table_auto_inc.out b/regression-test/data/data_model_p0/unique/test_unique_table_auto_inc.out index c6910870c09d7e..743a8ae8c75cc4 100644 --- a/regression-test/data/data_model_p0/unique/test_unique_table_auto_inc.out +++ b/regression-test/data/data_model_p0/unique/test_unique_table_auto_inc.out @@ -189,13 +189,6 @@ NNereids 9998 3 EUROPE foobar 4 MIDDLE EAST foobar --- !sql -- -0 AFRICA barfoo -1 AMERICA barfoo -2 ASIA barfoo -3 EUROPE barfoo -4 MIDDLE EAST barfoo - -- !sql -- 0 AFRICA lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to 1 AMERICA hs use ironic, even requests. s @@ -210,13 +203,6 @@ NNereids 9998 3 test1 test2 4 MIDDLE EAST uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl --- !sql -- -0 AFRICA lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to -1 AMERICA hs use ironic, even requests. s -2 ASIA ges. thinly even pinto beans ca -3 test1 test2 -4 test3 test4 - -- !sql -- 1 A 2 B diff --git a/regression-test/data/datatype_p0/decimalv3/test_decimalv3.out b/regression-test/data/datatype_p0/decimalv3/test_decimalv3.out index b137c2c882d72e..ca130bf00be4a8 100644 --- a/regression-test/data/datatype_p0/decimalv3/test_decimalv3.out +++ b/regression-test/data/datatype_p0/decimalv3/test_decimalv3.out @@ -59,18 +59,6 @@ -- !decimal256_const_7 -- 1.4E-80 --- !decimal256_const_8 -- -1.4E-45 - --- !decimal256_const_9 -- -1.4E-80 - --- !decimal256_const_10 -- -1.4E-45 - --- !decimal256_const_11 -- -1.4E-80 - -- !decimal256_cast_from_str_0 -- 1 9999999999999999999999999999999999999999999999999999999999999999999999.999999 9999999999999999999999999999999999999999999999999999999999999999999999.999999 2 -9999999999999999999999999999999999999999999999999999999999999999999999.999999 -9999999999999999999999999999999999999999999999999999999999999999999999.999999 diff --git a/regression-test/data/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.out b/regression-test/data/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.out index e29e7082664cb9..069148cd3f49dd 100644 --- a/regression-test/data/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.out +++ b/regression-test/data/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.out @@ -2,18 +2,6 @@ -- !sql -- array_enumerate_uniq --- !old_sql -- -[1, 1, 2] - --- !old_sql -- -[1, 1, 1] - --- !old_sql -- -[1] - --- !sql -- -array_enumerate_uniq - -- !nereid_sql -- [1, 1, 2] diff --git a/regression-test/data/datatype_p0/scalar_types/get_assignment_compatible_type.out b/regression-test/data/datatype_p0/scalar_types/get_assignment_compatible_type.out index 030a9b1286c214..09b6e11c95f98e 100644 --- a/regression-test/data/datatype_p0/scalar_types/get_assignment_compatible_type.out +++ b/regression-test/data/datatype_p0/scalar_types/get_assignment_compatible_type.out @@ -1,6 +1,6 @@ -- This file is automatically generated. You should know what you did if you want to edit this -- !test_sql -- -test_decimal_boolean_view CREATE VIEW `test_decimal_boolean_view` AS SELECT `id` AS `id`, `c1` AS `c1`, `c2` AS `c2` FROM `regression_test_datatype_p0_scalar_types`.`test_decimal_boolean` WHERE ((0.0 = CAST(`c1` AS decimalv3(2,1))) AND (CAST(`c2` AS decimalv3(6,1)) = 1.0)); utf8mb4 utf8mb4_0900_bin +test_decimal_boolean_view CREATE VIEW `test_decimal_boolean_view` AS select `internal`.`regression_test_datatype_p0_scalar_types`.`test_decimal_boolean`.`id`,`internal`.`regression_test_datatype_p0_scalar_types`.`test_decimal_boolean`.`c1`,`internal`.`regression_test_datatype_p0_scalar_types`.`test_decimal_boolean`.`c2` from `internal`.`regression_test_datatype_p0_scalar_types`.`test_decimal_boolean` where 0.0=`internal`.`regression_test_datatype_p0_scalar_types`.`test_decimal_boolean`.`c1` and `internal`.`regression_test_datatype_p0_scalar_types`.`test_decimal_boolean`.`c2` = 1.0; utf8mb4 utf8mb4_0900_bin -- !test_union -- 0.0 diff --git a/regression-test/data/ddl_p0/test_create_or_replace_view.out b/regression-test/data/ddl_p0/test_create_or_replace_view.out new file mode 100644 index 00000000000000..2448ba68e84eaf --- /dev/null +++ b/regression-test/data/ddl_p0/test_create_or_replace_view.out @@ -0,0 +1,7 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_1 -- +1 1 1 + +-- !sql_2 -- +2 2 2 + diff --git a/regression-test/data/ddl_p0/test_create_table_generated_column/test_create_table_generated_column_legacy.out b/regression-test/data/ddl_p0/test_create_table_generated_column/test_create_table_generated_column_legacy.out deleted file mode 100644 index afe56855ba3e99..00000000000000 --- a/regression-test/data/ddl_p0/test_create_table_generated_column/test_create_table_generated_column_legacy.out +++ /dev/null @@ -1,93 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !common_default -- -0 - --- !common_without_generated_always -- -0 - --- !gencol_in_middle -- -0 - --- !gencol_refer_gencol -- -0 - --- !gencol_array_function_create -- -0 - --- !gencol_array_function_element_at_create -- -0 - --- !common_default_insert -- -1 - --- !common_default_insert_with_specific_column -- -1 - --- !common_default_test_insert_default -- -1 - --- !commont_default_select -- -1 2 3.0 -3 5 8.0 -6 7 13.0 - --- !common_without_generated_always_insert -- -1 - --- !common_without_generated_always_insert_with_specific_column -- -1 - --- !commont_without_generated_always_select -- -1 2 3.0 -6 7 13.0 - --- !gencol_in_middle_insert -- -1 - --- !gencol_in_middle_insert_with_specific_column -- -1 - --- !gencol_in_middle_insert_with_specific_column_2 -- -1 - --- !gencol_in_middle_select -- -1 6.0 5 -1 7.0 6 -4 9.0 5 - --- !gencol_refer_gencol_insert -- -1 - --- !gencol_refer_gencol_insert2 -- -1 - --- !gencol_refer_gencol_insert3 -- -1 - --- !gencol_refer_gencol_insert4 -- -1 - --- !gencol_refer_gencol_select -- -1 6.0 5 7 -2 11.0 9 12 -3 6.0 3 7 -5 11.0 6 12 - --- !gencol_array_function_insert -- -1 - --- !gencol_array_function_select -- -1 [1, 2] [3, 2] [3, 2, 1] - --- !gencol_array_function_element_at_insert -- -1 - --- !gencol_array_function_element_at_select -- -1 [1, 2] [3, 2] 1 - --- !gencol_refer_gencol -- -0 - --- !test_drop_column -- -3 - diff --git a/regression-test/data/ddl_p0/test_create_table_generated_column/test_delete_generated_column.out b/regression-test/data/ddl_p0/test_create_table_generated_column/test_delete_generated_column.out index 1f14f41375bed3..2441607cf64fac 100644 --- a/regression-test/data/ddl_p0/test_create_table_generated_column/test_delete_generated_column.out +++ b/regression-test/data/ddl_p0/test_create_table_generated_column/test_delete_generated_column.out @@ -1,52 +1,28 @@ -- This file is automatically generated. You should know what you did if you want to edit this --- !delete_where_gen_col -- -0 - -- !delete_where_gen_col_select -- 2 22 24 10 2 12 --- !delete_where_gen_col_partition_has_no_satisfied_row -- -0 - -- !delete_where_gen_col_partition_has_no_satisfied_row_select -- 2 22 24 10 2 12 --- !delete_where_gen_col_and_other_col -- -0 - -- !delete_where_gen_col_and_other_col_select -- 2 22 24 --- !delete_where_gen_col_unique -- -1 - -- !delete_where_gen_col_select_unique -- 2 22 24 10 2 12 --- !delete_where_gen_col_partition_has_no_satisfied_row_unique -- -0 - -- !delete_where_gen_col_partition_has_no_satisfied_row_select_unique -- 2 22 24 10 2 12 --- !delete_where_gen_col_and_other_col_unique -- -1 - -- !delete_where_gen_col_and_other_col_select_unique -- 2 22 24 --- !delete_query -- -1 - -- !delete_query_select -- --- !delete_query_cte -- -1 - -- !delete_query_cte_select -- 1 2 3 10 2 12 diff --git a/regression-test/data/ddl_p0/test_create_view.out b/regression-test/data/ddl_p0/test_create_view.out deleted file mode 100644 index 1edf464474fd28..00000000000000 --- a/regression-test/data/ddl_p0/test_create_view.out +++ /dev/null @@ -1,29 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !test_view_1 -- -1 [1, 2, 3] -2 [10, -2, 8] -3 [-1, 20, 0] - --- !test_view_2 -- -1 [1, 2, 3] [1, 1, 1] -2 [10, -2, 8] [1, 0, 1] -3 [-1, 20, 0] [0, 1, 0] - --- !test_view_3 -- -1 [1, 2, 3] [1, 2, 3] [1, 2, 3] -2 [10, -2, 8] [10, 8] [10, 8] -3 [-1, 20, 0] [20] [20] - --- !test_view_4 -- -1 [1, 2, 3] [1, 2, 3] [1, 2, 3] -2 [10, -2, 8] [10, 8] [10, 8] -3 [-1, 20, 0] [20] [20] - --- !test_view_5 -- -1 [1, 2, 3] [1, 1, 1] -2 [10, -2, 8] [1, 0, 1] -3 [-1, 20, 0] [0, 1, 0] - --- !test_view_6 -- -v1 CREATE VIEW `v1` AS SELECT `error_code` AS `error_code`, 1 AS `__literal_1`, 'string' AS `__literal_2`, now() AS `__now_3`, dayofyear(`op_time`) AS `__dayofyear_4`, CAST(`source` AS bigint) AS `__cast_expr_5`, min(`timestamp`) OVER (ORDER BY `op_time` DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) AS `__min_6`, (1 > 2) AS `__binary_predicate_7`, (2 + 3) AS `__arithmetic_expr_8`, 1 IN (1, 2, 3, 4) AS `__in_predicate_9`, `remark` LIKE '%like' AS `__like_predicate_10`, CASE WHEN (`remark` = 's') THEN 1 ELSE 2 END AS `__case_expr_11`, (TRUE | FALSE) AS `__arithmetic_expr_12` FROM `regression_test_ddl_p0`.`view_column_name_test`; - diff --git a/regression-test/data/delete_p0/test_delete_on_mor.out b/regression-test/data/delete_p0/test_delete_on_mor.out index 69029c568889c1..f1a57227ddabc3 100644 --- a/regression-test/data/delete_p0/test_delete_on_mor.out +++ b/regression-test/data/delete_p0/test_delete_on_mor.out @@ -24,28 +24,3 @@ 10003 u5 重庆 32 0 10004 u7 重庆 35 1 --- !sql -- -10000 u1 北京 20 1 -10001 u3 北京 30 0 -10002 u4 上海 20 1 -10003 u5 重庆 32 0 -10004 u7 重庆 35 1 - --- !sql -- -10000 u1 北京 20 1 -10001 u3 北京 30 0 -10002 u4 上海 20 1 -10003 u5 广州 32 0 -10004 u7 深圳 35 1 - --- !sql -- -10001 u3 北京 30 0 -10002 u4 上海 20 1 -10003 u5 重庆 32 0 -10004 u7 重庆 35 1 - --- !sql -- -10001 u3 北京 30 0 -10003 u5 重庆 32 0 -10004 u7 重庆 35 1 - diff --git a/regression-test/data/export_p2/test_export_max_file_size.out b/regression-test/data/export_p2/test_export_max_file_size.out index 8b11e1b7ce4abf..41b609be4cb78f 100644 --- a/regression-test/data/export_p2/test_export_max_file_size.out +++ b/regression-test/data/export_p2/test_export_max_file_size.out @@ -1,1003 +1,1006 @@ -- This file is automatically generated. You should know what you did if you want to edit this -- !select -- -1 2017-10-01 2017-10-01T00:00 Beijing 1 1 true 1 1 1 1.1 1.1 char1 1 -10 2017-10-01 2017-10-01T00:00 Beijing 10 10 true 10 10 10 10.1 10.1 char10 10 -100 2017-10-01 2017-10-01T00:00 Beijing 100 100 true 100 100 100 100.1 100.1 char100 100 -1000 2017-10-01 2017-10-01T00:00 Beijing 1000 104 true 1000 1000 1000 1000.1 1000.1 char1000 1000 -101 2017-10-01 2017-10-01T00:00 Beijing 101 101 true 101 101 101 101.101 101.101 char101 101 -102 2017-10-01 2017-10-01T00:00 Beijing 102 102 true 102 102 102 102.102 102.102 char102 102 -103 2017-10-01 2017-10-01T00:00 Beijing 103 103 true 103 103 103 103.103 103.103 char103 103 -104 2017-10-01 2017-10-01T00:00 Beijing 104 104 true 104 104 104 104.104 104.104 char104 104 -105 2017-10-01 2017-10-01T00:00 Beijing 105 105 true 105 105 105 105.105 105.105 char105 105 -106 2017-10-01 2017-10-01T00:00 Beijing 106 106 true 106 106 106 106.106 106.106 char106 106 -107 2017-10-01 2017-10-01T00:00 Beijing 107 107 true 107 107 107 107.107 107.107 char107 107 -108 2017-10-01 2017-10-01T00:00 Beijing 108 108 true 108 108 108 108.108 108.108 char108 108 -109 2017-10-01 2017-10-01T00:00 Beijing 109 109 true 109 109 109 109.109 109.109 char109 109 -11 2017-10-01 2017-10-01T00:00 Beijing 11 11 true 11 11 11 11.11 11.11 char11 11 -110 2017-10-01 2017-10-01T00:00 Beijing 110 110 true 110 110 110 110.11 110.11 char110 110 -111 2017-10-01 2017-10-01T00:00 Beijing 111 111 true 111 111 111 111.111 111.111 char111 111 -112 2017-10-01 2017-10-01T00:00 Beijing 112 112 true 112 112 112 112.112 112.112 char112 112 -113 2017-10-01 2017-10-01T00:00 Beijing 113 113 true 113 113 113 113.113 113.113 char113 113 -114 2017-10-01 2017-10-01T00:00 Beijing 114 114 true 114 114 114 114.114 114.114 char114 114 -115 2017-10-01 2017-10-01T00:00 Beijing 115 115 true 115 115 115 115.115 115.115 char115 115 -116 2017-10-01 2017-10-01T00:00 Beijing 116 116 true 116 116 116 116.116 116.116 char116 116 -117 2017-10-01 2017-10-01T00:00 Beijing 117 117 true 117 117 117 117.117 117.117 char117 117 -118 2017-10-01 2017-10-01T00:00 Beijing 118 118 true 118 118 118 118.118 118.118 char118 118 -119 2017-10-01 2017-10-01T00:00 Beijing 119 119 true 119 119 119 119.119 119.119 char119 119 -12 2017-10-01 2017-10-01T00:00 Beijing 12 12 true 12 12 12 12.12 12.12 char12 12 -120 2017-10-01 2017-10-01T00:00 Beijing 120 120 true 120 120 120 120.12 120.12 char120 120 -121 2017-10-01 2017-10-01T00:00 Beijing 121 121 true 121 121 121 121.121 121.121 char121 121 -122 2017-10-01 2017-10-01T00:00 Beijing 122 122 true 122 122 122 122.122 122.122 char122 122 -123 2017-10-01 2017-10-01T00:00 Beijing 123 123 true 123 123 123 123.123 123.123 char123 123 -124 2017-10-01 2017-10-01T00:00 Beijing 124 124 true 124 124 124 124.124 124.124 char124 124 -125 2017-10-01 2017-10-01T00:00 Beijing 125 125 true 125 125 125 125.125 125.125 char125 125 -126 2017-10-01 2017-10-01T00:00 Beijing 126 126 true 126 126 126 126.126 126.126 char126 126 -127 2017-10-01 2017-10-01T00:00 Beijing 127 127 true 127 127 127 127.127 127.127 char127 127 -128 2017-10-01 2017-10-01T00:00 Beijing 128 0 true 128 128 128 128.128 128.128 char128 128 -129 2017-10-01 2017-10-01T00:00 Beijing 129 1 true 129 129 129 129.129 129.129 char129 129 -13 2017-10-01 2017-10-01T00:00 Beijing 13 13 true 13 13 13 13.13 13.13 char13 13 -130 2017-10-01 2017-10-01T00:00 Beijing 130 2 true 130 130 130 130.13 130.13 char130 130 -131 2017-10-01 2017-10-01T00:00 Beijing 131 3 true 131 131 131 131.131 131.131 char131 131 -132 2017-10-01 2017-10-01T00:00 Beijing 132 4 true 132 132 132 132.132 132.132 char132 132 -133 2017-10-01 2017-10-01T00:00 Beijing 133 5 true 133 133 133 133.133 133.133 char133 133 -134 2017-10-01 2017-10-01T00:00 Beijing 134 6 true 134 134 134 134.134 134.134 char134 134 -135 2017-10-01 2017-10-01T00:00 Beijing 135 7 true 135 135 135 135.135 135.135 char135 135 -136 2017-10-01 2017-10-01T00:00 Beijing 136 8 true 136 136 136 136.136 136.136 char136 136 -137 2017-10-01 2017-10-01T00:00 Beijing 137 9 true 137 137 137 137.137 137.137 char137 137 -138 2017-10-01 2017-10-01T00:00 Beijing 138 10 true 138 138 138 138.138 138.138 char138 138 -139 2017-10-01 2017-10-01T00:00 Beijing 139 11 true 139 139 139 139.139 139.139 char139 139 -14 2017-10-01 2017-10-01T00:00 Beijing 14 14 true 14 14 14 14.14 14.14 char14 14 -140 2017-10-01 2017-10-01T00:00 Beijing 140 12 true 140 140 140 140.14 140.14 char140 140 -141 2017-10-01 2017-10-01T00:00 Beijing 141 13 true 141 141 141 141.141 141.141 char141 141 -142 2017-10-01 2017-10-01T00:00 Beijing 142 14 true 142 142 142 142.142 142.142 char142 142 -143 2017-10-01 2017-10-01T00:00 Beijing 143 15 true 143 143 143 143.143 143.143 char143 143 -144 2017-10-01 2017-10-01T00:00 Beijing 144 16 true 144 144 144 144.144 144.144 char144 144 -145 2017-10-01 2017-10-01T00:00 Beijing 145 17 true 145 145 145 145.145 145.145 char145 145 -146 2017-10-01 2017-10-01T00:00 Beijing 146 18 true 146 146 146 146.146 146.146 char146 146 -147 2017-10-01 2017-10-01T00:00 Beijing 147 19 true 147 147 147 147.147 147.147 char147 147 -148 2017-10-01 2017-10-01T00:00 Beijing 148 20 true 148 148 148 148.148 148.148 char148 148 -149 2017-10-01 2017-10-01T00:00 Beijing 149 21 true 149 149 149 149.149 149.149 char149 149 -15 2017-10-01 2017-10-01T00:00 Beijing 15 15 true 15 15 15 15.15 15.15 char15 15 -150 2017-10-01 2017-10-01T00:00 Beijing 150 22 true 150 150 150 150.15 150.15 char150 150 -151 2017-10-01 2017-10-01T00:00 Beijing 151 23 true 151 151 151 151.151 151.151 char151 151 -152 2017-10-01 2017-10-01T00:00 Beijing 152 24 true 152 152 152 152.152 152.152 char152 152 -153 2017-10-01 2017-10-01T00:00 Beijing 153 25 true 153 153 153 153.153 153.153 char153 153 -154 2017-10-01 2017-10-01T00:00 Beijing 154 26 true 154 154 154 154.154 154.154 char154 154 -155 2017-10-01 2017-10-01T00:00 Beijing 155 27 true 155 155 155 155.155 155.155 char155 155 -156 2017-10-01 2017-10-01T00:00 Beijing 156 28 true 156 156 156 156.156 156.156 char156 156 -157 2017-10-01 2017-10-01T00:00 Beijing 157 29 true 157 157 157 157.157 157.157 char157 157 -158 2017-10-01 2017-10-01T00:00 Beijing 158 30 true 158 158 158 158.158 158.158 char158 158 -159 2017-10-01 2017-10-01T00:00 Beijing 159 31 true 159 159 159 159.159 159.159 char159 159 -16 2017-10-01 2017-10-01T00:00 Beijing 16 16 true 16 16 16 16.16 16.16 char16 16 -160 2017-10-01 2017-10-01T00:00 Beijing 160 32 true 160 160 160 160.16 160.16 char160 160 -161 2017-10-01 2017-10-01T00:00 Beijing 161 33 true 161 161 161 161.161 161.161 char161 161 -162 2017-10-01 2017-10-01T00:00 Beijing 162 34 true 162 162 162 162.162 162.162 char162 162 -163 2017-10-01 2017-10-01T00:00 Beijing 163 35 true 163 163 163 163.163 163.163 char163 163 -164 2017-10-01 2017-10-01T00:00 Beijing 164 36 true 164 164 164 164.164 164.164 char164 164 -165 2017-10-01 2017-10-01T00:00 Beijing 165 37 true 165 165 165 165.165 165.165 char165 165 -166 2017-10-01 2017-10-01T00:00 Beijing 166 38 true 166 166 166 166.166 166.166 char166 166 -167 2017-10-01 2017-10-01T00:00 Beijing 167 39 true 167 167 167 167.167 167.167 char167 167 -168 2017-10-01 2017-10-01T00:00 Beijing 168 40 true 168 168 168 168.168 168.168 char168 168 -169 2017-10-01 2017-10-01T00:00 Beijing 169 41 true 169 169 169 169.169 169.169 char169 169 -17 2017-10-01 2017-10-01T00:00 Beijing 17 17 true 17 17 17 17.17 17.17 char17 17 -170 2017-10-01 2017-10-01T00:00 Beijing 170 42 true 170 170 170 170.17 170.17 char170 170 -171 2017-10-01 2017-10-01T00:00 Beijing 171 43 true 171 171 171 171.171 171.171 char171 171 -172 2017-10-01 2017-10-01T00:00 Beijing 172 44 true 172 172 172 172.172 172.172 char172 172 -173 2017-10-01 2017-10-01T00:00 Beijing 173 45 true 173 173 173 173.173 173.173 char173 173 -174 2017-10-01 2017-10-01T00:00 Beijing 174 46 true 174 174 174 174.174 174.174 char174 174 -175 2017-10-01 2017-10-01T00:00 Beijing 175 47 true 175 175 175 175.175 175.175 char175 175 -176 2017-10-01 2017-10-01T00:00 Beijing 176 48 true 176 176 176 176.176 176.176 char176 176 -177 2017-10-01 2017-10-01T00:00 Beijing 177 49 true 177 177 177 177.177 177.177 char177 177 -178 2017-10-01 2017-10-01T00:00 Beijing 178 50 true 178 178 178 178.178 178.178 char178 178 -179 2017-10-01 2017-10-01T00:00 Beijing 179 51 true 179 179 179 179.179 179.179 char179 179 -18 2017-10-01 2017-10-01T00:00 Beijing 18 18 true 18 18 18 18.18 18.18 char18 18 -180 2017-10-01 2017-10-01T00:00 Beijing 180 52 true 180 180 180 180.18 180.18 char180 180 -181 2017-10-01 2017-10-01T00:00 Beijing 181 53 true 181 181 181 181.181 181.181 char181 181 -182 2017-10-01 2017-10-01T00:00 Beijing 182 54 true 182 182 182 182.182 182.182 char182 182 -183 2017-10-01 2017-10-01T00:00 Beijing 183 55 true 183 183 183 183.183 183.183 char183 183 -184 2017-10-01 2017-10-01T00:00 Beijing 184 56 true 184 184 184 184.184 184.184 char184 184 -185 2017-10-01 2017-10-01T00:00 Beijing 185 57 true 185 185 185 185.185 185.185 char185 185 -186 2017-10-01 2017-10-01T00:00 Beijing 186 58 true 186 186 186 186.186 186.186 char186 186 -187 2017-10-01 2017-10-01T00:00 Beijing 187 59 true 187 187 187 187.187 187.187 char187 187 -188 2017-10-01 2017-10-01T00:00 Beijing 188 60 true 188 188 188 188.188 188.188 char188 188 -189 2017-10-01 2017-10-01T00:00 Beijing 189 61 true 189 189 189 189.189 189.189 char189 189 -19 2017-10-01 2017-10-01T00:00 Beijing 19 19 true 19 19 19 19.19 19.19 char19 19 -190 2017-10-01 2017-10-01T00:00 Beijing 190 62 true 190 190 190 190.19 190.19 char190 190 -191 2017-10-01 2017-10-01T00:00 Beijing 191 63 true 191 191 191 191.191 191.191 char191 191 -192 2017-10-01 2017-10-01T00:00 Beijing 192 64 true 192 192 192 192.192 192.192 char192 192 -193 2017-10-01 2017-10-01T00:00 Beijing 193 65 true 193 193 193 193.193 193.193 char193 193 -194 2017-10-01 2017-10-01T00:00 Beijing 194 66 true 194 194 194 194.194 194.194 char194 194 -195 2017-10-01 2017-10-01T00:00 Beijing 195 67 true 195 195 195 195.195 195.195 char195 195 -196 2017-10-01 2017-10-01T00:00 Beijing 196 68 true 196 196 196 196.196 196.196 char196 196 -197 2017-10-01 2017-10-01T00:00 Beijing 197 69 true 197 197 197 197.197 197.197 char197 197 -198 2017-10-01 2017-10-01T00:00 Beijing 198 70 true 198 198 198 198.198 198.198 char198 198 -199 2017-10-01 2017-10-01T00:00 Beijing 199 71 true 199 199 199 199.199 199.199 char199 199 -2 2017-10-01 2017-10-01T00:00 Beijing 2 2 true 2 2 2 2.2 2.2 char2 2 -20 2017-10-01 2017-10-01T00:00 Beijing 20 20 true 20 20 20 20.2 20.2 char20 20 -200 2017-10-01 2017-10-01T00:00 Beijing 200 72 true 200 200 200 200.2 200.2 char200 200 -201 2017-10-01 2017-10-01T00:00 Beijing 201 73 true 201 201 201 201.201 201.201 char201 201 -202 2017-10-01 2017-10-01T00:00 Beijing 202 74 true 202 202 202 202.202 202.202 char202 202 -203 2017-10-01 2017-10-01T00:00 Beijing 203 75 true 203 203 203 203.203 203.203 char203 203 -204 2017-10-01 2017-10-01T00:00 Beijing 204 76 true 204 204 204 204.204 204.204 char204 204 -205 2017-10-01 2017-10-01T00:00 Beijing 205 77 true 205 205 205 205.205 205.205 char205 205 -206 2017-10-01 2017-10-01T00:00 Beijing 206 78 true 206 206 206 206.206 206.206 char206 206 -207 2017-10-01 2017-10-01T00:00 Beijing 207 79 true 207 207 207 207.207 207.207 char207 207 -208 2017-10-01 2017-10-01T00:00 Beijing 208 80 true 208 208 208 208.208 208.208 char208 208 -209 2017-10-01 2017-10-01T00:00 Beijing 209 81 true 209 209 209 209.209 209.209 char209 209 -21 2017-10-01 2017-10-01T00:00 Beijing 21 21 true 21 21 21 21.21 21.21 char21 21 -210 2017-10-01 2017-10-01T00:00 Beijing 210 82 true 210 210 210 210.21 210.21 char210 210 -211 2017-10-01 2017-10-01T00:00 Beijing 211 83 true 211 211 211 211.211 211.211 char211 211 -212 2017-10-01 2017-10-01T00:00 Beijing 212 84 true 212 212 212 212.212 212.212 char212 212 -213 2017-10-01 2017-10-01T00:00 Beijing 213 85 true 213 213 213 213.213 213.213 char213 213 -214 2017-10-01 2017-10-01T00:00 Beijing 214 86 true 214 214 214 214.214 214.214 char214 214 -215 2017-10-01 2017-10-01T00:00 Beijing 215 87 true 215 215 215 215.215 215.215 char215 215 -216 2017-10-01 2017-10-01T00:00 Beijing 216 88 true 216 216 216 216.216 216.216 char216 216 -217 2017-10-01 2017-10-01T00:00 Beijing 217 89 true 217 217 217 217.217 217.217 char217 217 -218 2017-10-01 2017-10-01T00:00 Beijing 218 90 true 218 218 218 218.218 218.218 char218 218 -219 2017-10-01 2017-10-01T00:00 Beijing 219 91 true 219 219 219 219.219 219.219 char219 219 -22 2017-10-01 2017-10-01T00:00 Beijing 22 22 true 22 22 22 22.22 22.22 char22 22 -220 2017-10-01 2017-10-01T00:00 Beijing 220 92 true 220 220 220 220.22 220.22 char220 220 -221 2017-10-01 2017-10-01T00:00 Beijing 221 93 true 221 221 221 221.221 221.221 char221 221 -222 2017-10-01 2017-10-01T00:00 Beijing 222 94 true 222 222 222 222.222 222.222 char222 222 -223 2017-10-01 2017-10-01T00:00 Beijing 223 95 true 223 223 223 223.223 223.223 char223 223 -224 2017-10-01 2017-10-01T00:00 Beijing 224 96 true 224 224 224 224.224 224.224 char224 224 -225 2017-10-01 2017-10-01T00:00 Beijing 225 97 true 225 225 225 225.225 225.225 char225 225 -226 2017-10-01 2017-10-01T00:00 Beijing 226 98 true 226 226 226 226.226 226.226 char226 226 -227 2017-10-01 2017-10-01T00:00 Beijing 227 99 true 227 227 227 227.227 227.227 char227 227 -228 2017-10-01 2017-10-01T00:00 Beijing 228 100 true 228 228 228 228.228 228.228 char228 228 -229 2017-10-01 2017-10-01T00:00 Beijing 229 101 true 229 229 229 229.229 229.229 char229 229 -23 2017-10-01 2017-10-01T00:00 Beijing 23 23 true 23 23 23 23.23 23.23 char23 23 -230 2017-10-01 2017-10-01T00:00 Beijing 230 102 true 230 230 230 230.23 230.23 char230 230 -231 2017-10-01 2017-10-01T00:00 Beijing 231 103 true 231 231 231 231.231 231.231 char231 231 -232 2017-10-01 2017-10-01T00:00 Beijing 232 104 true 232 232 232 232.232 232.232 char232 232 -233 2017-10-01 2017-10-01T00:00 Beijing 233 105 true 233 233 233 233.233 233.233 char233 233 -234 2017-10-01 2017-10-01T00:00 Beijing 234 106 true 234 234 234 234.234 234.234 char234 234 -235 2017-10-01 2017-10-01T00:00 Beijing 235 107 true 235 235 235 235.235 235.235 char235 235 -236 2017-10-01 2017-10-01T00:00 Beijing 236 108 true 236 236 236 236.236 236.236 char236 236 -237 2017-10-01 2017-10-01T00:00 Beijing 237 109 true 237 237 237 237.237 237.237 char237 237 -238 2017-10-01 2017-10-01T00:00 Beijing 238 110 true 238 238 238 238.238 238.238 char238 238 -239 2017-10-01 2017-10-01T00:00 Beijing 239 111 true 239 239 239 239.239 239.239 char239 239 -24 2017-10-01 2017-10-01T00:00 Beijing 24 24 true 24 24 24 24.24 24.24 char24 24 -240 2017-10-01 2017-10-01T00:00 Beijing 240 112 true 240 240 240 240.24 240.24 char240 240 -241 2017-10-01 2017-10-01T00:00 Beijing 241 113 true 241 241 241 241.241 241.241 char241 241 -242 2017-10-01 2017-10-01T00:00 Beijing 242 114 true 242 242 242 242.242 242.242 char242 242 -243 2017-10-01 2017-10-01T00:00 Beijing 243 115 true 243 243 243 243.243 243.243 char243 243 -244 2017-10-01 2017-10-01T00:00 Beijing 244 116 true 244 244 244 244.244 244.244 char244 244 -245 2017-10-01 2017-10-01T00:00 Beijing 245 117 true 245 245 245 245.245 245.245 char245 245 -246 2017-10-01 2017-10-01T00:00 Beijing 246 118 true 246 246 246 246.246 246.246 char246 246 -247 2017-10-01 2017-10-01T00:00 Beijing 247 119 true 247 247 247 247.247 247.247 char247 247 -248 2017-10-01 2017-10-01T00:00 Beijing 248 120 true 248 248 248 248.248 248.248 char248 248 -249 2017-10-01 2017-10-01T00:00 Beijing 249 121 true 249 249 249 249.249 249.249 char249 249 -25 2017-10-01 2017-10-01T00:00 Beijing 25 25 true 25 25 25 25.25 25.25 char25 25 -250 2017-10-01 2017-10-01T00:00 Beijing 250 122 true 250 250 250 250.25 250.25 char250 250 -251 2017-10-01 2017-10-01T00:00 Beijing 251 123 true 251 251 251 251.251 251.251 char251 251 -252 2017-10-01 2017-10-01T00:00 Beijing 252 124 true 252 252 252 252.252 252.252 char252 252 -253 2017-10-01 2017-10-01T00:00 Beijing 253 125 true 253 253 253 253.253 253.253 char253 253 -254 2017-10-01 2017-10-01T00:00 Beijing 254 126 true 254 254 254 254.254 254.254 char254 254 -255 2017-10-01 2017-10-01T00:00 Beijing 255 127 true 255 255 255 255.255 255.255 char255 255 -256 2017-10-01 2017-10-01T00:00 Beijing 256 0 true 256 256 256 256.256 256.256 char256 256 -257 2017-10-01 2017-10-01T00:00 Beijing 257 1 true 257 257 257 257.257 257.257 char257 257 -258 2017-10-01 2017-10-01T00:00 Beijing 258 2 true 258 258 258 258.258 258.258 char258 258 -259 2017-10-01 2017-10-01T00:00 Beijing 259 3 true 259 259 259 259.259 259.259 char259 259 -26 2017-10-01 2017-10-01T00:00 Beijing 26 26 true 26 26 26 26.26 26.26 char26 26 -260 2017-10-01 2017-10-01T00:00 Beijing 260 4 true 260 260 260 260.26 260.26 char260 260 -261 2017-10-01 2017-10-01T00:00 Beijing 261 5 true 261 261 261 261.261 261.261 char261 261 -262 2017-10-01 2017-10-01T00:00 Beijing 262 6 true 262 262 262 262.262 262.262 char262 262 -263 2017-10-01 2017-10-01T00:00 Beijing 263 7 true 263 263 263 263.263 263.263 char263 263 -264 2017-10-01 2017-10-01T00:00 Beijing 264 8 true 264 264 264 264.264 264.264 char264 264 -265 2017-10-01 2017-10-01T00:00 Beijing 265 9 true 265 265 265 265.265 265.265 char265 265 -266 2017-10-01 2017-10-01T00:00 Beijing 266 10 true 266 266 266 266.266 266.266 char266 266 -267 2017-10-01 2017-10-01T00:00 Beijing 267 11 true 267 267 267 267.267 267.267 char267 267 -268 2017-10-01 2017-10-01T00:00 Beijing 268 12 true 268 268 268 268.268 268.268 char268 268 -269 2017-10-01 2017-10-01T00:00 Beijing 269 13 true 269 269 269 269.269 269.269 char269 269 -27 2017-10-01 2017-10-01T00:00 Beijing 27 27 true 27 27 27 27.27 27.27 char27 27 -270 2017-10-01 2017-10-01T00:00 Beijing 270 14 true 270 270 270 270.27 270.27 char270 270 -271 2017-10-01 2017-10-01T00:00 Beijing 271 15 true 271 271 271 271.271 271.271 char271 271 -272 2017-10-01 2017-10-01T00:00 Beijing 272 16 true 272 272 272 272.272 272.272 char272 272 -273 2017-10-01 2017-10-01T00:00 Beijing 273 17 true 273 273 273 273.273 273.273 char273 273 -274 2017-10-01 2017-10-01T00:00 Beijing 274 18 true 274 274 274 274.274 274.274 char274 274 -275 2017-10-01 2017-10-01T00:00 Beijing 275 19 true 275 275 275 275.275 275.275 char275 275 -276 2017-10-01 2017-10-01T00:00 Beijing 276 20 true 276 276 276 276.276 276.276 char276 276 -277 2017-10-01 2017-10-01T00:00 Beijing 277 21 true 277 277 277 277.277 277.277 char277 277 -278 2017-10-01 2017-10-01T00:00 Beijing 278 22 true 278 278 278 278.278 278.278 char278 278 -279 2017-10-01 2017-10-01T00:00 Beijing 279 23 true 279 279 279 279.279 279.279 char279 279 -28 2017-10-01 2017-10-01T00:00 Beijing 28 28 true 28 28 28 28.28 28.28 char28 28 -280 2017-10-01 2017-10-01T00:00 Beijing 280 24 true 280 280 280 280.28 280.28 char280 280 -281 2017-10-01 2017-10-01T00:00 Beijing 281 25 true 281 281 281 281.281 281.281 char281 281 -282 2017-10-01 2017-10-01T00:00 Beijing 282 26 true 282 282 282 282.282 282.282 char282 282 -283 2017-10-01 2017-10-01T00:00 Beijing 283 27 true 283 283 283 283.283 283.283 char283 283 -284 2017-10-01 2017-10-01T00:00 Beijing 284 28 true 284 284 284 284.284 284.284 char284 284 -285 2017-10-01 2017-10-01T00:00 Beijing 285 29 true 285 285 285 285.285 285.285 char285 285 -286 2017-10-01 2017-10-01T00:00 Beijing 286 30 true 286 286 286 286.286 286.286 char286 286 -287 2017-10-01 2017-10-01T00:00 Beijing 287 31 true 287 287 287 287.287 287.287 char287 287 -288 2017-10-01 2017-10-01T00:00 Beijing 288 32 true 288 288 288 288.288 288.288 char288 288 -289 2017-10-01 2017-10-01T00:00 Beijing 289 33 true 289 289 289 289.289 289.289 char289 289 -29 2017-10-01 2017-10-01T00:00 Beijing 29 29 true 29 29 29 29.29 29.29 char29 29 -290 2017-10-01 2017-10-01T00:00 Beijing 290 34 true 290 290 290 290.29 290.29 char290 290 -291 2017-10-01 2017-10-01T00:00 Beijing 291 35 true 291 291 291 291.291 291.291 char291 291 -292 2017-10-01 2017-10-01T00:00 Beijing 292 36 true 292 292 292 292.292 292.292 char292 292 -293 2017-10-01 2017-10-01T00:00 Beijing 293 37 true 293 293 293 293.293 293.293 char293 293 -294 2017-10-01 2017-10-01T00:00 Beijing 294 38 true 294 294 294 294.294 294.294 char294 294 -295 2017-10-01 2017-10-01T00:00 Beijing 295 39 true 295 295 295 295.295 295.295 char295 295 -296 2017-10-01 2017-10-01T00:00 Beijing 296 40 true 296 296 296 296.296 296.296 char296 296 -297 2017-10-01 2017-10-01T00:00 Beijing 297 41 true 297 297 297 297.297 297.297 char297 297 -298 2017-10-01 2017-10-01T00:00 Beijing 298 42 true 298 298 298 298.298 298.298 char298 298 -299 2017-10-01 2017-10-01T00:00 Beijing 299 43 true 299 299 299 299.299 299.299 char299 299 -3 2017-10-01 2017-10-01T00:00 Beijing 3 3 true 3 3 3 3.3 3.3 char3 3 -30 2017-10-01 2017-10-01T00:00 Beijing 30 30 true 30 30 30 30.3 30.3 char30 30 -300 2017-10-01 2017-10-01T00:00 Beijing 300 44 true 300 300 300 300.3 300.3 char300 300 -301 2017-10-01 2017-10-01T00:00 Beijing 301 45 true 301 301 301 301.301 301.301 char301 301 -302 2017-10-01 2017-10-01T00:00 Beijing 302 46 true 302 302 302 302.302 302.302 char302 302 -303 2017-10-01 2017-10-01T00:00 Beijing 303 47 true 303 303 303 303.303 303.303 char303 303 -304 2017-10-01 2017-10-01T00:00 Beijing 304 48 true 304 304 304 304.304 304.304 char304 304 -305 2017-10-01 2017-10-01T00:00 Beijing 305 49 true 305 305 305 305.305 305.305 char305 305 -306 2017-10-01 2017-10-01T00:00 Beijing 306 50 true 306 306 306 306.306 306.306 char306 306 -307 2017-10-01 2017-10-01T00:00 Beijing 307 51 true 307 307 307 307.307 307.307 char307 307 -308 2017-10-01 2017-10-01T00:00 Beijing 308 52 true 308 308 308 308.308 308.308 char308 308 -309 2017-10-01 2017-10-01T00:00 Beijing 309 53 true 309 309 309 309.309 309.309 char309 309 -31 2017-10-01 2017-10-01T00:00 Beijing 31 31 true 31 31 31 31.31 31.31 char31 31 -310 2017-10-01 2017-10-01T00:00 Beijing 310 54 true 310 310 310 310.31 310.31 char310 310 -311 2017-10-01 2017-10-01T00:00 Beijing 311 55 true 311 311 311 311.311 311.311 char311 311 -312 2017-10-01 2017-10-01T00:00 Beijing 312 56 true 312 312 312 312.312 312.312 char312 312 -313 2017-10-01 2017-10-01T00:00 Beijing 313 57 true 313 313 313 313.313 313.313 char313 313 -314 2017-10-01 2017-10-01T00:00 Beijing 314 58 true 314 314 314 314.314 314.314 char314 314 -315 2017-10-01 2017-10-01T00:00 Beijing 315 59 true 315 315 315 315.315 315.315 char315 315 -316 2017-10-01 2017-10-01T00:00 Beijing 316 60 true 316 316 316 316.316 316.316 char316 316 -317 2017-10-01 2017-10-01T00:00 Beijing 317 61 true 317 317 317 317.317 317.317 char317 317 -318 2017-10-01 2017-10-01T00:00 Beijing 318 62 true 318 318 318 318.318 318.318 char318 318 -319 2017-10-01 2017-10-01T00:00 Beijing 319 63 true 319 319 319 319.319 319.319 char319 319 -32 2017-10-01 2017-10-01T00:00 Beijing 32 32 true 32 32 32 32.32 32.32 char32 32 -320 2017-10-01 2017-10-01T00:00 Beijing 320 64 true 320 320 320 320.32 320.32 char320 320 -321 2017-10-01 2017-10-01T00:00 Beijing 321 65 true 321 321 321 321.321 321.321 char321 321 -322 2017-10-01 2017-10-01T00:00 Beijing 322 66 true 322 322 322 322.322 322.322 char322 322 -323 2017-10-01 2017-10-01T00:00 Beijing 323 67 true 323 323 323 323.323 323.323 char323 323 -324 2017-10-01 2017-10-01T00:00 Beijing 324 68 true 324 324 324 324.324 324.324 char324 324 -325 2017-10-01 2017-10-01T00:00 Beijing 325 69 true 325 325 325 325.325 325.325 char325 325 -326 2017-10-01 2017-10-01T00:00 Beijing 326 70 true 326 326 326 326.326 326.326 char326 326 -327 2017-10-01 2017-10-01T00:00 Beijing 327 71 true 327 327 327 327.327 327.327 char327 327 -328 2017-10-01 2017-10-01T00:00 Beijing 328 72 true 328 328 328 328.328 328.328 char328 328 -329 2017-10-01 2017-10-01T00:00 Beijing 329 73 true 329 329 329 329.329 329.329 char329 329 -33 2017-10-01 2017-10-01T00:00 Beijing 33 33 true 33 33 33 33.33 33.33 char33 33 -330 2017-10-01 2017-10-01T00:00 Beijing 330 74 true 330 330 330 330.33 330.33 char330 330 -331 2017-10-01 2017-10-01T00:00 Beijing 331 75 true 331 331 331 331.331 331.331 char331 331 -332 2017-10-01 2017-10-01T00:00 Beijing 332 76 true 332 332 332 332.332 332.332 char332 332 -333 2017-10-01 2017-10-01T00:00 Beijing 333 77 true 333 333 333 333.333 333.333 char333 333 -334 2017-10-01 2017-10-01T00:00 Beijing 334 78 true 334 334 334 334.334 334.334 char334 334 -335 2017-10-01 2017-10-01T00:00 Beijing 335 79 true 335 335 335 335.335 335.335 char335 335 -336 2017-10-01 2017-10-01T00:00 Beijing 336 80 true 336 336 336 336.336 336.336 char336 336 -337 2017-10-01 2017-10-01T00:00 Beijing 337 81 true 337 337 337 337.337 337.337 char337 337 -338 2017-10-01 2017-10-01T00:00 Beijing 338 82 true 338 338 338 338.338 338.338 char338 338 -339 2017-10-01 2017-10-01T00:00 Beijing 339 83 true 339 339 339 339.339 339.339 char339 339 -34 2017-10-01 2017-10-01T00:00 Beijing 34 34 true 34 34 34 34.34 34.34 char34 34 -340 2017-10-01 2017-10-01T00:00 Beijing 340 84 true 340 340 340 340.34 340.34 char340 340 -341 2017-10-01 2017-10-01T00:00 Beijing 341 85 true 341 341 341 341.341 341.341 char341 341 -342 2017-10-01 2017-10-01T00:00 Beijing 342 86 true 342 342 342 342.342 342.342 char342 342 -343 2017-10-01 2017-10-01T00:00 Beijing 343 87 true 343 343 343 343.343 343.343 char343 343 -344 2017-10-01 2017-10-01T00:00 Beijing 344 88 true 344 344 344 344.344 344.344 char344 344 -345 2017-10-01 2017-10-01T00:00 Beijing 345 89 true 345 345 345 345.345 345.345 char345 345 -346 2017-10-01 2017-10-01T00:00 Beijing 346 90 true 346 346 346 346.346 346.346 char346 346 -347 2017-10-01 2017-10-01T00:00 Beijing 347 91 true 347 347 347 347.347 347.347 char347 347 -348 2017-10-01 2017-10-01T00:00 Beijing 348 92 true 348 348 348 348.348 348.348 char348 348 -349 2017-10-01 2017-10-01T00:00 Beijing 349 93 true 349 349 349 349.349 349.349 char349 349 -35 2017-10-01 2017-10-01T00:00 Beijing 35 35 true 35 35 35 35.35 35.35 char35 35 -350 2017-10-01 2017-10-01T00:00 Beijing 350 94 true 350 350 350 350.35 350.35 char350 350 -351 2017-10-01 2017-10-01T00:00 Beijing 351 95 true 351 351 351 351.351 351.351 char351 351 -352 2017-10-01 2017-10-01T00:00 Beijing 352 96 true 352 352 352 352.352 352.352 char352 352 -353 2017-10-01 2017-10-01T00:00 Beijing 353 97 true 353 353 353 353.353 353.353 char353 353 -354 2017-10-01 2017-10-01T00:00 Beijing 354 98 true 354 354 354 354.354 354.354 char354 354 -355 2017-10-01 2017-10-01T00:00 Beijing 355 99 true 355 355 355 355.355 355.355 char355 355 -356 2017-10-01 2017-10-01T00:00 Beijing 356 100 true 356 356 356 356.356 356.356 char356 356 -357 2017-10-01 2017-10-01T00:00 Beijing 357 101 true 357 357 357 357.357 357.357 char357 357 -358 2017-10-01 2017-10-01T00:00 Beijing 358 102 true 358 358 358 358.358 358.358 char358 358 -359 2017-10-01 2017-10-01T00:00 Beijing 359 103 true 359 359 359 359.359 359.359 char359 359 -36 2017-10-01 2017-10-01T00:00 Beijing 36 36 true 36 36 36 36.36 36.36 char36 36 -360 2017-10-01 2017-10-01T00:00 Beijing 360 104 true 360 360 360 360.36 360.36 char360 360 -361 2017-10-01 2017-10-01T00:00 Beijing 361 105 true 361 361 361 361.361 361.361 char361 361 -362 2017-10-01 2017-10-01T00:00 Beijing 362 106 true 362 362 362 362.362 362.362 char362 362 -363 2017-10-01 2017-10-01T00:00 Beijing 363 107 true 363 363 363 363.363 363.363 char363 363 -364 2017-10-01 2017-10-01T00:00 Beijing 364 108 true 364 364 364 364.364 364.364 char364 364 -365 2017-10-01 2017-10-01T00:00 Beijing 365 109 true 365 365 365 365.365 365.365 char365 365 -366 2017-10-01 2017-10-01T00:00 Beijing 366 110 true 366 366 366 366.366 366.366 char366 366 -367 2017-10-01 2017-10-01T00:00 Beijing 367 111 true 367 367 367 367.367 367.367 char367 367 -368 2017-10-01 2017-10-01T00:00 Beijing 368 112 true 368 368 368 368.368 368.368 char368 368 -369 2017-10-01 2017-10-01T00:00 Beijing 369 113 true 369 369 369 369.369 369.369 char369 369 -37 2017-10-01 2017-10-01T00:00 Beijing 37 37 true 37 37 37 37.37 37.37 char37 37 -370 2017-10-01 2017-10-01T00:00 Beijing 370 114 true 370 370 370 370.37 370.37 char370 370 -371 2017-10-01 2017-10-01T00:00 Beijing 371 115 true 371 371 371 371.371 371.371 char371 371 -372 2017-10-01 2017-10-01T00:00 Beijing 372 116 true 372 372 372 372.372 372.372 char372 372 -373 2017-10-01 2017-10-01T00:00 Beijing 373 117 true 373 373 373 373.373 373.373 char373 373 -374 2017-10-01 2017-10-01T00:00 Beijing 374 118 true 374 374 374 374.374 374.374 char374 374 -375 2017-10-01 2017-10-01T00:00 Beijing 375 119 true 375 375 375 375.375 375.375 char375 375 -376 2017-10-01 2017-10-01T00:00 Beijing 376 120 true 376 376 376 376.376 376.376 char376 376 -377 2017-10-01 2017-10-01T00:00 Beijing 377 121 true 377 377 377 377.377 377.377 char377 377 -378 2017-10-01 2017-10-01T00:00 Beijing 378 122 true 378 378 378 378.378 378.378 char378 378 -379 2017-10-01 2017-10-01T00:00 Beijing 379 123 true 379 379 379 379.379 379.379 char379 379 -38 2017-10-01 2017-10-01T00:00 Beijing 38 38 true 38 38 38 38.38 38.38 char38 38 -380 2017-10-01 2017-10-01T00:00 Beijing 380 124 true 380 380 380 380.38 380.38 char380 380 -381 2017-10-01 2017-10-01T00:00 Beijing 381 125 true 381 381 381 381.381 381.381 char381 381 -382 2017-10-01 2017-10-01T00:00 Beijing 382 126 true 382 382 382 382.382 382.382 char382 382 -383 2017-10-01 2017-10-01T00:00 Beijing 383 127 true 383 383 383 383.383 383.383 char383 383 -384 2017-10-01 2017-10-01T00:00 Beijing 384 0 true 384 384 384 384.384 384.384 char384 384 -385 2017-10-01 2017-10-01T00:00 Beijing 385 1 true 385 385 385 385.385 385.385 char385 385 -386 2017-10-01 2017-10-01T00:00 Beijing 386 2 true 386 386 386 386.386 386.386 char386 386 -387 2017-10-01 2017-10-01T00:00 Beijing 387 3 true 387 387 387 387.387 387.387 char387 387 -388 2017-10-01 2017-10-01T00:00 Beijing 388 4 true 388 388 388 388.388 388.388 char388 388 -389 2017-10-01 2017-10-01T00:00 Beijing 389 5 true 389 389 389 389.389 389.389 char389 389 -39 2017-10-01 2017-10-01T00:00 Beijing 39 39 true 39 39 39 39.39 39.39 char39 39 -390 2017-10-01 2017-10-01T00:00 Beijing 390 6 true 390 390 390 390.39 390.39 char390 390 -391 2017-10-01 2017-10-01T00:00 Beijing 391 7 true 391 391 391 391.391 391.391 char391 391 -392 2017-10-01 2017-10-01T00:00 Beijing 392 8 true 392 392 392 392.392 392.392 char392 392 -393 2017-10-01 2017-10-01T00:00 Beijing 393 9 true 393 393 393 393.393 393.393 char393 393 -394 2017-10-01 2017-10-01T00:00 Beijing 394 10 true 394 394 394 394.394 394.394 char394 394 -395 2017-10-01 2017-10-01T00:00 Beijing 395 11 true 395 395 395 395.395 395.395 char395 395 -396 2017-10-01 2017-10-01T00:00 Beijing 396 12 true 396 396 396 396.396 396.396 char396 396 -397 2017-10-01 2017-10-01T00:00 Beijing 397 13 true 397 397 397 397.397 397.397 char397 397 -398 2017-10-01 2017-10-01T00:00 Beijing 398 14 true 398 398 398 398.398 398.398 char398 398 -399 2017-10-01 2017-10-01T00:00 Beijing 399 15 true 399 399 399 399.399 399.399 char399 399 -4 2017-10-01 2017-10-01T00:00 Beijing 4 4 true 4 4 4 4.4 4.4 char4 4 -40 2017-10-01 2017-10-01T00:00 Beijing 40 40 true 40 40 40 40.4 40.4 char40 40 -400 2017-10-01 2017-10-01T00:00 Beijing 400 16 true 400 400 400 400.4 400.4 char400 400 -401 2017-10-01 2017-10-01T00:00 Beijing 401 17 true 401 401 401 401.401 401.401 char401 401 -402 2017-10-01 2017-10-01T00:00 Beijing 402 18 true 402 402 402 402.402 402.402 char402 402 -403 2017-10-01 2017-10-01T00:00 Beijing 403 19 true 403 403 403 403.403 403.403 char403 403 -404 2017-10-01 2017-10-01T00:00 Beijing 404 20 true 404 404 404 404.404 404.404 char404 404 -405 2017-10-01 2017-10-01T00:00 Beijing 405 21 true 405 405 405 405.405 405.405 char405 405 -406 2017-10-01 2017-10-01T00:00 Beijing 406 22 true 406 406 406 406.406 406.406 char406 406 -407 2017-10-01 2017-10-01T00:00 Beijing 407 23 true 407 407 407 407.407 407.407 char407 407 -408 2017-10-01 2017-10-01T00:00 Beijing 408 24 true 408 408 408 408.408 408.408 char408 408 -409 2017-10-01 2017-10-01T00:00 Beijing 409 25 true 409 409 409 409.409 409.409 char409 409 -41 2017-10-01 2017-10-01T00:00 Beijing 41 41 true 41 41 41 41.41 41.41 char41 41 -410 2017-10-01 2017-10-01T00:00 Beijing 410 26 true 410 410 410 410.41 410.41 char410 410 -411 2017-10-01 2017-10-01T00:00 Beijing 411 27 true 411 411 411 411.411 411.411 char411 411 -412 2017-10-01 2017-10-01T00:00 Beijing 412 28 true 412 412 412 412.412 412.412 char412 412 -413 2017-10-01 2017-10-01T00:00 Beijing 413 29 true 413 413 413 413.413 413.413 char413 413 -414 2017-10-01 2017-10-01T00:00 Beijing 414 30 true 414 414 414 414.414 414.414 char414 414 -415 2017-10-01 2017-10-01T00:00 Beijing 415 31 true 415 415 415 415.415 415.415 char415 415 -416 2017-10-01 2017-10-01T00:00 Beijing 416 32 true 416 416 416 416.416 416.416 char416 416 -417 2017-10-01 2017-10-01T00:00 Beijing 417 33 true 417 417 417 417.417 417.417 char417 417 -418 2017-10-01 2017-10-01T00:00 Beijing 418 34 true 418 418 418 418.418 418.418 char418 418 -419 2017-10-01 2017-10-01T00:00 Beijing 419 35 true 419 419 419 419.419 419.419 char419 419 -42 2017-10-01 2017-10-01T00:00 Beijing 42 42 true 42 42 42 42.42 42.42 char42 42 -420 2017-10-01 2017-10-01T00:00 Beijing 420 36 true 420 420 420 420.42 420.42 char420 420 -421 2017-10-01 2017-10-01T00:00 Beijing 421 37 true 421 421 421 421.421 421.421 char421 421 -422 2017-10-01 2017-10-01T00:00 Beijing 422 38 true 422 422 422 422.422 422.422 char422 422 -423 2017-10-01 2017-10-01T00:00 Beijing 423 39 true 423 423 423 423.423 423.423 char423 423 -424 2017-10-01 2017-10-01T00:00 Beijing 424 40 true 424 424 424 424.424 424.424 char424 424 -425 2017-10-01 2017-10-01T00:00 Beijing 425 41 true 425 425 425 425.425 425.425 char425 425 -426 2017-10-01 2017-10-01T00:00 Beijing 426 42 true 426 426 426 426.426 426.426 char426 426 -427 2017-10-01 2017-10-01T00:00 Beijing 427 43 true 427 427 427 427.427 427.427 char427 427 -428 2017-10-01 2017-10-01T00:00 Beijing 428 44 true 428 428 428 428.428 428.428 char428 428 -429 2017-10-01 2017-10-01T00:00 Beijing 429 45 true 429 429 429 429.429 429.429 char429 429 -43 2017-10-01 2017-10-01T00:00 Beijing 43 43 true 43 43 43 43.43 43.43 char43 43 -430 2017-10-01 2017-10-01T00:00 Beijing 430 46 true 430 430 430 430.43 430.43 char430 430 -431 2017-10-01 2017-10-01T00:00 Beijing 431 47 true 431 431 431 431.431 431.431 char431 431 -432 2017-10-01 2017-10-01T00:00 Beijing 432 48 true 432 432 432 432.432 432.432 char432 432 -433 2017-10-01 2017-10-01T00:00 Beijing 433 49 true 433 433 433 433.433 433.433 char433 433 -434 2017-10-01 2017-10-01T00:00 Beijing 434 50 true 434 434 434 434.434 434.434 char434 434 -435 2017-10-01 2017-10-01T00:00 Beijing 435 51 true 435 435 435 435.435 435.435 char435 435 -436 2017-10-01 2017-10-01T00:00 Beijing 436 52 true 436 436 436 436.436 436.436 char436 436 -437 2017-10-01 2017-10-01T00:00 Beijing 437 53 true 437 437 437 437.437 437.437 char437 437 -438 2017-10-01 2017-10-01T00:00 Beijing 438 54 true 438 438 438 438.438 438.438 char438 438 -439 2017-10-01 2017-10-01T00:00 Beijing 439 55 true 439 439 439 439.439 439.439 char439 439 -44 2017-10-01 2017-10-01T00:00 Beijing 44 44 true 44 44 44 44.44 44.44 char44 44 -440 2017-10-01 2017-10-01T00:00 Beijing 440 56 true 440 440 440 440.44 440.44 char440 440 -441 2017-10-01 2017-10-01T00:00 Beijing 441 57 true 441 441 441 441.441 441.441 char441 441 -442 2017-10-01 2017-10-01T00:00 Beijing 442 58 true 442 442 442 442.442 442.442 char442 442 -443 2017-10-01 2017-10-01T00:00 Beijing 443 59 true 443 443 443 443.443 443.443 char443 443 -444 2017-10-01 2017-10-01T00:00 Beijing 444 60 true 444 444 444 444.444 444.444 char444 444 -445 2017-10-01 2017-10-01T00:00 Beijing 445 61 true 445 445 445 445.445 445.445 char445 445 -446 2017-10-01 2017-10-01T00:00 Beijing 446 62 true 446 446 446 446.446 446.446 char446 446 -447 2017-10-01 2017-10-01T00:00 Beijing 447 63 true 447 447 447 447.447 447.447 char447 447 -448 2017-10-01 2017-10-01T00:00 Beijing 448 64 true 448 448 448 448.448 448.448 char448 448 -449 2017-10-01 2017-10-01T00:00 Beijing 449 65 true 449 449 449 449.449 449.449 char449 449 -45 2017-10-01 2017-10-01T00:00 Beijing 45 45 true 45 45 45 45.45 45.45 char45 45 -450 2017-10-01 2017-10-01T00:00 Beijing 450 66 true 450 450 450 450.45 450.45 char450 450 -451 2017-10-01 2017-10-01T00:00 Beijing 451 67 true 451 451 451 451.451 451.451 char451 451 -452 2017-10-01 2017-10-01T00:00 Beijing 452 68 true 452 452 452 452.452 452.452 char452 452 -453 2017-10-01 2017-10-01T00:00 Beijing 453 69 true 453 453 453 453.453 453.453 char453 453 -454 2017-10-01 2017-10-01T00:00 Beijing 454 70 true 454 454 454 454.454 454.454 char454 454 -455 2017-10-01 2017-10-01T00:00 Beijing 455 71 true 455 455 455 455.455 455.455 char455 455 -456 2017-10-01 2017-10-01T00:00 Beijing 456 72 true 456 456 456 456.456 456.456 char456 456 -457 2017-10-01 2017-10-01T00:00 Beijing 457 73 true 457 457 457 457.457 457.457 char457 457 -458 2017-10-01 2017-10-01T00:00 Beijing 458 74 true 458 458 458 458.458 458.458 char458 458 -459 2017-10-01 2017-10-01T00:00 Beijing 459 75 true 459 459 459 459.459 459.459 char459 459 -46 2017-10-01 2017-10-01T00:00 Beijing 46 46 true 46 46 46 46.46 46.46 char46 46 -460 2017-10-01 2017-10-01T00:00 Beijing 460 76 true 460 460 460 460.46 460.46 char460 460 -461 2017-10-01 2017-10-01T00:00 Beijing 461 77 true 461 461 461 461.461 461.461 char461 461 -462 2017-10-01 2017-10-01T00:00 Beijing 462 78 true 462 462 462 462.462 462.462 char462 462 -463 2017-10-01 2017-10-01T00:00 Beijing 463 79 true 463 463 463 463.463 463.463 char463 463 -464 2017-10-01 2017-10-01T00:00 Beijing 464 80 true 464 464 464 464.464 464.464 char464 464 -465 2017-10-01 2017-10-01T00:00 Beijing 465 81 true 465 465 465 465.465 465.465 char465 465 -466 2017-10-01 2017-10-01T00:00 Beijing 466 82 true 466 466 466 466.466 466.466 char466 466 -467 2017-10-01 2017-10-01T00:00 Beijing 467 83 true 467 467 467 467.467 467.467 char467 467 -468 2017-10-01 2017-10-01T00:00 Beijing 468 84 true 468 468 468 468.468 468.468 char468 468 -469 2017-10-01 2017-10-01T00:00 Beijing 469 85 true 469 469 469 469.469 469.469 char469 469 -47 2017-10-01 2017-10-01T00:00 Beijing 47 47 true 47 47 47 47.47 47.47 char47 47 -470 2017-10-01 2017-10-01T00:00 Beijing 470 86 true 470 470 470 470.47 470.47 char470 470 -471 2017-10-01 2017-10-01T00:00 Beijing 471 87 true 471 471 471 471.471 471.471 char471 471 -472 2017-10-01 2017-10-01T00:00 Beijing 472 88 true 472 472 472 472.472 472.472 char472 472 -473 2017-10-01 2017-10-01T00:00 Beijing 473 89 true 473 473 473 473.473 473.473 char473 473 -474 2017-10-01 2017-10-01T00:00 Beijing 474 90 true 474 474 474 474.474 474.474 char474 474 -475 2017-10-01 2017-10-01T00:00 Beijing 475 91 true 475 475 475 475.475 475.475 char475 475 -476 2017-10-01 2017-10-01T00:00 Beijing 476 92 true 476 476 476 476.476 476.476 char476 476 -477 2017-10-01 2017-10-01T00:00 Beijing 477 93 true 477 477 477 477.477 477.477 char477 477 -478 2017-10-01 2017-10-01T00:00 Beijing 478 94 true 478 478 478 478.478 478.478 char478 478 -479 2017-10-01 2017-10-01T00:00 Beijing 479 95 true 479 479 479 479.479 479.479 char479 479 -48 2017-10-01 2017-10-01T00:00 Beijing 48 48 true 48 48 48 48.48 48.48 char48 48 -480 2017-10-01 2017-10-01T00:00 Beijing 480 96 true 480 480 480 480.48 480.48 char480 480 -481 2017-10-01 2017-10-01T00:00 Beijing 481 97 true 481 481 481 481.481 481.481 char481 481 -482 2017-10-01 2017-10-01T00:00 Beijing 482 98 true 482 482 482 482.482 482.482 char482 482 -483 2017-10-01 2017-10-01T00:00 Beijing 483 99 true 483 483 483 483.483 483.483 char483 483 -484 2017-10-01 2017-10-01T00:00 Beijing 484 100 true 484 484 484 484.484 484.484 char484 484 -485 2017-10-01 2017-10-01T00:00 Beijing 485 101 true 485 485 485 485.485 485.485 char485 485 -486 2017-10-01 2017-10-01T00:00 Beijing 486 102 true 486 486 486 486.486 486.486 char486 486 -487 2017-10-01 2017-10-01T00:00 Beijing 487 103 true 487 487 487 487.487 487.487 char487 487 -488 2017-10-01 2017-10-01T00:00 Beijing 488 104 true 488 488 488 488.488 488.488 char488 488 -489 2017-10-01 2017-10-01T00:00 Beijing 489 105 true 489 489 489 489.489 489.489 char489 489 -49 2017-10-01 2017-10-01T00:00 Beijing 49 49 true 49 49 49 49.49 49.49 char49 49 -490 2017-10-01 2017-10-01T00:00 Beijing 490 106 true 490 490 490 490.49 490.49 char490 490 -491 2017-10-01 2017-10-01T00:00 Beijing 491 107 true 491 491 491 491.491 491.491 char491 491 -492 2017-10-01 2017-10-01T00:00 Beijing 492 108 true 492 492 492 492.492 492.492 char492 492 -493 2017-10-01 2017-10-01T00:00 Beijing 493 109 true 493 493 493 493.493 493.493 char493 493 -494 2017-10-01 2017-10-01T00:00 Beijing 494 110 true 494 494 494 494.494 494.494 char494 494 -495 2017-10-01 2017-10-01T00:00 Beijing 495 111 true 495 495 495 495.495 495.495 char495 495 -496 2017-10-01 2017-10-01T00:00 Beijing 496 112 true 496 496 496 496.496 496.496 char496 496 -497 2017-10-01 2017-10-01T00:00 Beijing 497 113 true 497 497 497 497.497 497.497 char497 497 -498 2017-10-01 2017-10-01T00:00 Beijing 498 114 true 498 498 498 498.498 498.498 char498 498 -499 2017-10-01 2017-10-01T00:00 Beijing 499 115 true 499 499 499 499.499 499.499 char499 499 -5 2017-10-01 2017-10-01T00:00 Beijing 5 5 true 5 5 5 5.5 5.5 char5 5 -50 2017-10-01 2017-10-01T00:00 Beijing 50 50 true 50 50 50 50.5 50.5 char50 50 -500 2017-10-01 2017-10-01T00:00 Beijing 500 116 true 500 500 500 500.5 500.5 char500 500 -501 2017-10-01 2017-10-01T00:00 Beijing 501 117 true 501 501 501 501.501 501.501 char501 501 -502 2017-10-01 2017-10-01T00:00 Beijing 502 118 true 502 502 502 502.502 502.502 char502 502 -503 2017-10-01 2017-10-01T00:00 Beijing 503 119 true 503 503 503 503.503 503.503 char503 503 -504 2017-10-01 2017-10-01T00:00 Beijing 504 120 true 504 504 504 504.504 504.504 char504 504 -505 2017-10-01 2017-10-01T00:00 Beijing 505 121 true 505 505 505 505.505 505.505 char505 505 -506 2017-10-01 2017-10-01T00:00 Beijing 506 122 true 506 506 506 506.506 506.506 char506 506 -507 2017-10-01 2017-10-01T00:00 Beijing 507 123 true 507 507 507 507.507 507.507 char507 507 -508 2017-10-01 2017-10-01T00:00 Beijing 508 124 true 508 508 508 508.508 508.508 char508 508 -509 2017-10-01 2017-10-01T00:00 Beijing 509 125 true 509 509 509 509.509 509.509 char509 509 -51 2017-10-01 2017-10-01T00:00 Beijing 51 51 true 51 51 51 51.51 51.51 char51 51 -510 2017-10-01 2017-10-01T00:00 Beijing 510 126 true 510 510 510 510.51 510.51 char510 510 -511 2017-10-01 2017-10-01T00:00 Beijing 511 127 true 511 511 511 511.511 511.511 char511 511 -512 2017-10-01 2017-10-01T00:00 Beijing 512 0 true 512 512 512 512.512 512.512 char512 512 -513 2017-10-01 2017-10-01T00:00 Beijing 513 1 true 513 513 513 513.513 513.513 char513 513 -514 2017-10-01 2017-10-01T00:00 Beijing 514 2 true 514 514 514 514.514 514.514 char514 514 -515 2017-10-01 2017-10-01T00:00 Beijing 515 3 true 515 515 515 515.515 515.515 char515 515 -516 2017-10-01 2017-10-01T00:00 Beijing 516 4 true 516 516 516 516.516 516.516 char516 516 -517 2017-10-01 2017-10-01T00:00 Beijing 517 5 true 517 517 517 517.517 517.517 char517 517 -518 2017-10-01 2017-10-01T00:00 Beijing 518 6 true 518 518 518 518.518 518.518 char518 518 -519 2017-10-01 2017-10-01T00:00 Beijing 519 7 true 519 519 519 519.519 519.519 char519 519 -52 2017-10-01 2017-10-01T00:00 Beijing 52 52 true 52 52 52 52.52 52.52 char52 52 -520 2017-10-01 2017-10-01T00:00 Beijing 520 8 true 520 520 520 520.52 520.52 char520 520 -521 2017-10-01 2017-10-01T00:00 Beijing 521 9 true 521 521 521 521.521 521.521 char521 521 -522 2017-10-01 2017-10-01T00:00 Beijing 522 10 true 522 522 522 522.522 522.522 char522 522 -523 2017-10-01 2017-10-01T00:00 Beijing 523 11 true 523 523 523 523.523 523.523 char523 523 -524 2017-10-01 2017-10-01T00:00 Beijing 524 12 true 524 524 524 524.524 524.524 char524 524 -525 2017-10-01 2017-10-01T00:00 Beijing 525 13 true 525 525 525 525.525 525.525 char525 525 -526 2017-10-01 2017-10-01T00:00 Beijing 526 14 true 526 526 526 526.526 526.526 char526 526 -527 2017-10-01 2017-10-01T00:00 Beijing 527 15 true 527 527 527 527.527 527.527 char527 527 -528 2017-10-01 2017-10-01T00:00 Beijing 528 16 true 528 528 528 528.528 528.528 char528 528 -529 2017-10-01 2017-10-01T00:00 Beijing 529 17 true 529 529 529 529.529 529.529 char529 529 -53 2017-10-01 2017-10-01T00:00 Beijing 53 53 true 53 53 53 53.53 53.53 char53 53 -530 2017-10-01 2017-10-01T00:00 Beijing 530 18 true 530 530 530 530.53 530.53 char530 530 -531 2017-10-01 2017-10-01T00:00 Beijing 531 19 true 531 531 531 531.531 531.531 char531 531 -532 2017-10-01 2017-10-01T00:00 Beijing 532 20 true 532 532 532 532.532 532.532 char532 532 -533 2017-10-01 2017-10-01T00:00 Beijing 533 21 true 533 533 533 533.533 533.533 char533 533 -534 2017-10-01 2017-10-01T00:00 Beijing 534 22 true 534 534 534 534.534 534.534 char534 534 -535 2017-10-01 2017-10-01T00:00 Beijing 535 23 true 535 535 535 535.535 535.535 char535 535 -536 2017-10-01 2017-10-01T00:00 Beijing 536 24 true 536 536 536 536.536 536.536 char536 536 -537 2017-10-01 2017-10-01T00:00 Beijing 537 25 true 537 537 537 537.537 537.537 char537 537 -538 2017-10-01 2017-10-01T00:00 Beijing 538 26 true 538 538 538 538.538 538.538 char538 538 -539 2017-10-01 2017-10-01T00:00 Beijing 539 27 true 539 539 539 539.539 539.539 char539 539 -54 2017-10-01 2017-10-01T00:00 Beijing 54 54 true 54 54 54 54.54 54.54 char54 54 -540 2017-10-01 2017-10-01T00:00 Beijing 540 28 true 540 540 540 540.54 540.54 char540 540 -541 2017-10-01 2017-10-01T00:00 Beijing 541 29 true 541 541 541 541.541 541.541 char541 541 -542 2017-10-01 2017-10-01T00:00 Beijing 542 30 true 542 542 542 542.542 542.542 char542 542 -543 2017-10-01 2017-10-01T00:00 Beijing 543 31 true 543 543 543 543.543 543.543 char543 543 -544 2017-10-01 2017-10-01T00:00 Beijing 544 32 true 544 544 544 544.544 544.544 char544 544 -545 2017-10-01 2017-10-01T00:00 Beijing 545 33 true 545 545 545 545.545 545.545 char545 545 -546 2017-10-01 2017-10-01T00:00 Beijing 546 34 true 546 546 546 546.546 546.546 char546 546 -547 2017-10-01 2017-10-01T00:00 Beijing 547 35 true 547 547 547 547.547 547.547 char547 547 -548 2017-10-01 2017-10-01T00:00 Beijing 548 36 true 548 548 548 548.548 548.548 char548 548 -549 2017-10-01 2017-10-01T00:00 Beijing 549 37 true 549 549 549 549.549 549.549 char549 549 -55 2017-10-01 2017-10-01T00:00 Beijing 55 55 true 55 55 55 55.55 55.55 char55 55 -550 2017-10-01 2017-10-01T00:00 Beijing 550 38 true 550 550 550 550.55 550.55 char550 550 -551 2017-10-01 2017-10-01T00:00 Beijing 551 39 true 551 551 551 551.551 551.551 char551 551 -552 2017-10-01 2017-10-01T00:00 Beijing 552 40 true 552 552 552 552.552 552.552 char552 552 -553 2017-10-01 2017-10-01T00:00 Beijing 553 41 true 553 553 553 553.553 553.553 char553 553 -554 2017-10-01 2017-10-01T00:00 Beijing 554 42 true 554 554 554 554.554 554.554 char554 554 -555 2017-10-01 2017-10-01T00:00 Beijing 555 43 true 555 555 555 555.555 555.555 char555 555 -556 2017-10-01 2017-10-01T00:00 Beijing 556 44 true 556 556 556 556.556 556.556 char556 556 -557 2017-10-01 2017-10-01T00:00 Beijing 557 45 true 557 557 557 557.557 557.557 char557 557 -558 2017-10-01 2017-10-01T00:00 Beijing 558 46 true 558 558 558 558.558 558.558 char558 558 -559 2017-10-01 2017-10-01T00:00 Beijing 559 47 true 559 559 559 559.559 559.559 char559 559 -56 2017-10-01 2017-10-01T00:00 Beijing 56 56 true 56 56 56 56.56 56.56 char56 56 -560 2017-10-01 2017-10-01T00:00 Beijing 560 48 true 560 560 560 560.56 560.56 char560 560 -561 2017-10-01 2017-10-01T00:00 Beijing 561 49 true 561 561 561 561.561 561.561 char561 561 -562 2017-10-01 2017-10-01T00:00 Beijing 562 50 true 562 562 562 562.562 562.562 char562 562 -563 2017-10-01 2017-10-01T00:00 Beijing 563 51 true 563 563 563 563.563 563.563 char563 563 -564 2017-10-01 2017-10-01T00:00 Beijing 564 52 true 564 564 564 564.564 564.564 char564 564 -565 2017-10-01 2017-10-01T00:00 Beijing 565 53 true 565 565 565 565.565 565.565 char565 565 -566 2017-10-01 2017-10-01T00:00 Beijing 566 54 true 566 566 566 566.566 566.566 char566 566 -567 2017-10-01 2017-10-01T00:00 Beijing 567 55 true 567 567 567 567.567 567.567 char567 567 -568 2017-10-01 2017-10-01T00:00 Beijing 568 56 true 568 568 568 568.568 568.568 char568 568 -569 2017-10-01 2017-10-01T00:00 Beijing 569 57 true 569 569 569 569.569 569.569 char569 569 -57 2017-10-01 2017-10-01T00:00 Beijing 57 57 true 57 57 57 57.57 57.57 char57 57 -570 2017-10-01 2017-10-01T00:00 Beijing 570 58 true 570 570 570 570.57 570.57 char570 570 -571 2017-10-01 2017-10-01T00:00 Beijing 571 59 true 571 571 571 571.571 571.571 char571 571 -572 2017-10-01 2017-10-01T00:00 Beijing 572 60 true 572 572 572 572.572 572.572 char572 572 -573 2017-10-01 2017-10-01T00:00 Beijing 573 61 true 573 573 573 573.573 573.573 char573 573 -574 2017-10-01 2017-10-01T00:00 Beijing 574 62 true 574 574 574 574.574 574.574 char574 574 -575 2017-10-01 2017-10-01T00:00 Beijing 575 63 true 575 575 575 575.575 575.575 char575 575 -576 2017-10-01 2017-10-01T00:00 Beijing 576 64 true 576 576 576 576.576 576.576 char576 576 -577 2017-10-01 2017-10-01T00:00 Beijing 577 65 true 577 577 577 577.577 577.577 char577 577 -578 2017-10-01 2017-10-01T00:00 Beijing 578 66 true 578 578 578 578.578 578.578 char578 578 -579 2017-10-01 2017-10-01T00:00 Beijing 579 67 true 579 579 579 579.579 579.579 char579 579 -58 2017-10-01 2017-10-01T00:00 Beijing 58 58 true 58 58 58 58.58 58.58 char58 58 -580 2017-10-01 2017-10-01T00:00 Beijing 580 68 true 580 580 580 580.58 580.58 char580 580 -581 2017-10-01 2017-10-01T00:00 Beijing 581 69 true 581 581 581 581.581 581.581 char581 581 -582 2017-10-01 2017-10-01T00:00 Beijing 582 70 true 582 582 582 582.582 582.582 char582 582 -583 2017-10-01 2017-10-01T00:00 Beijing 583 71 true 583 583 583 583.583 583.583 char583 583 -584 2017-10-01 2017-10-01T00:00 Beijing 584 72 true 584 584 584 584.584 584.584 char584 584 -585 2017-10-01 2017-10-01T00:00 Beijing 585 73 true 585 585 585 585.585 585.585 char585 585 -586 2017-10-01 2017-10-01T00:00 Beijing 586 74 true 586 586 586 586.586 586.586 char586 586 -587 2017-10-01 2017-10-01T00:00 Beijing 587 75 true 587 587 587 587.587 587.587 char587 587 -588 2017-10-01 2017-10-01T00:00 Beijing 588 76 true 588 588 588 588.588 588.588 char588 588 -589 2017-10-01 2017-10-01T00:00 Beijing 589 77 true 589 589 589 589.589 589.589 char589 589 -59 2017-10-01 2017-10-01T00:00 Beijing 59 59 true 59 59 59 59.59 59.59 char59 59 -590 2017-10-01 2017-10-01T00:00 Beijing 590 78 true 590 590 590 590.59 590.59 char590 590 -591 2017-10-01 2017-10-01T00:00 Beijing 591 79 true 591 591 591 591.591 591.591 char591 591 -592 2017-10-01 2017-10-01T00:00 Beijing 592 80 true 592 592 592 592.592 592.592 char592 592 -593 2017-10-01 2017-10-01T00:00 Beijing 593 81 true 593 593 593 593.593 593.593 char593 593 -594 2017-10-01 2017-10-01T00:00 Beijing 594 82 true 594 594 594 594.594 594.594 char594 594 -595 2017-10-01 2017-10-01T00:00 Beijing 595 83 true 595 595 595 595.595 595.595 char595 595 -596 2017-10-01 2017-10-01T00:00 Beijing 596 84 true 596 596 596 596.596 596.596 char596 596 -597 2017-10-01 2017-10-01T00:00 Beijing 597 85 true 597 597 597 597.597 597.597 char597 597 -598 2017-10-01 2017-10-01T00:00 Beijing 598 86 true 598 598 598 598.598 598.598 char598 598 -599 2017-10-01 2017-10-01T00:00 Beijing 599 87 true 599 599 599 599.599 599.599 char599 599 -6 2017-10-01 2017-10-01T00:00 Beijing 6 6 true 6 6 6 6.6 6.6 char6 6 -60 2017-10-01 2017-10-01T00:00 Beijing 60 60 true 60 60 60 60.6 60.6 char60 60 -600 2017-10-01 2017-10-01T00:00 Beijing 600 88 true 600 600 600 600.6 600.6 char600 600 -601 2017-10-01 2017-10-01T00:00 Beijing 601 89 true 601 601 601 601.601 601.601 char601 601 -602 2017-10-01 2017-10-01T00:00 Beijing 602 90 true 602 602 602 602.602 602.602 char602 602 -603 2017-10-01 2017-10-01T00:00 Beijing 603 91 true 603 603 603 603.603 603.603 char603 603 -604 2017-10-01 2017-10-01T00:00 Beijing 604 92 true 604 604 604 604.604 604.604 char604 604 -605 2017-10-01 2017-10-01T00:00 Beijing 605 93 true 605 605 605 605.605 605.605 char605 605 -606 2017-10-01 2017-10-01T00:00 Beijing 606 94 true 606 606 606 606.606 606.606 char606 606 -607 2017-10-01 2017-10-01T00:00 Beijing 607 95 true 607 607 607 607.607 607.607 char607 607 -608 2017-10-01 2017-10-01T00:00 Beijing 608 96 true 608 608 608 608.608 608.608 char608 608 -609 2017-10-01 2017-10-01T00:00 Beijing 609 97 true 609 609 609 609.609 609.609 char609 609 -61 2017-10-01 2017-10-01T00:00 Beijing 61 61 true 61 61 61 61.61 61.61 char61 61 -610 2017-10-01 2017-10-01T00:00 Beijing 610 98 true 610 610 610 610.61 610.61 char610 610 -611 2017-10-01 2017-10-01T00:00 Beijing 611 99 true 611 611 611 611.611 611.611 char611 611 -612 2017-10-01 2017-10-01T00:00 Beijing 612 100 true 612 612 612 612.612 612.612 char612 612 -613 2017-10-01 2017-10-01T00:00 Beijing 613 101 true 613 613 613 613.613 613.613 char613 613 -614 2017-10-01 2017-10-01T00:00 Beijing 614 102 true 614 614 614 614.614 614.614 char614 614 -615 2017-10-01 2017-10-01T00:00 Beijing 615 103 true 615 615 615 615.615 615.615 char615 615 -616 2017-10-01 2017-10-01T00:00 Beijing 616 104 true 616 616 616 616.616 616.616 char616 616 -617 2017-10-01 2017-10-01T00:00 Beijing 617 105 true 617 617 617 617.617 617.617 char617 617 -618 2017-10-01 2017-10-01T00:00 Beijing 618 106 true 618 618 618 618.618 618.618 char618 618 -619 2017-10-01 2017-10-01T00:00 Beijing 619 107 true 619 619 619 619.619 619.619 char619 619 -62 2017-10-01 2017-10-01T00:00 Beijing 62 62 true 62 62 62 62.62 62.62 char62 62 -620 2017-10-01 2017-10-01T00:00 Beijing 620 108 true 620 620 620 620.62 620.62 char620 620 -621 2017-10-01 2017-10-01T00:00 Beijing 621 109 true 621 621 621 621.621 621.621 char621 621 -622 2017-10-01 2017-10-01T00:00 Beijing 622 110 true 622 622 622 622.622 622.622 char622 622 -623 2017-10-01 2017-10-01T00:00 Beijing 623 111 true 623 623 623 623.623 623.623 char623 623 -624 2017-10-01 2017-10-01T00:00 Beijing 624 112 true 624 624 624 624.624 624.624 char624 624 -625 2017-10-01 2017-10-01T00:00 Beijing 625 113 true 625 625 625 625.625 625.625 char625 625 -626 2017-10-01 2017-10-01T00:00 Beijing 626 114 true 626 626 626 626.626 626.626 char626 626 -627 2017-10-01 2017-10-01T00:00 Beijing 627 115 true 627 627 627 627.627 627.627 char627 627 -628 2017-10-01 2017-10-01T00:00 Beijing 628 116 true 628 628 628 628.628 628.628 char628 628 -629 2017-10-01 2017-10-01T00:00 Beijing 629 117 true 629 629 629 629.629 629.629 char629 629 -63 2017-10-01 2017-10-01T00:00 Beijing 63 63 true 63 63 63 63.63 63.63 char63 63 -630 2017-10-01 2017-10-01T00:00 Beijing 630 118 true 630 630 630 630.63 630.63 char630 630 -631 2017-10-01 2017-10-01T00:00 Beijing 631 119 true 631 631 631 631.631 631.631 char631 631 -632 2017-10-01 2017-10-01T00:00 Beijing 632 120 true 632 632 632 632.632 632.632 char632 632 -633 2017-10-01 2017-10-01T00:00 Beijing 633 121 true 633 633 633 633.633 633.633 char633 633 -634 2017-10-01 2017-10-01T00:00 Beijing 634 122 true 634 634 634 634.634 634.634 char634 634 -635 2017-10-01 2017-10-01T00:00 Beijing 635 123 true 635 635 635 635.635 635.635 char635 635 -636 2017-10-01 2017-10-01T00:00 Beijing 636 124 true 636 636 636 636.636 636.636 char636 636 -637 2017-10-01 2017-10-01T00:00 Beijing 637 125 true 637 637 637 637.637 637.637 char637 637 -638 2017-10-01 2017-10-01T00:00 Beijing 638 126 true 638 638 638 638.638 638.638 char638 638 -639 2017-10-01 2017-10-01T00:00 Beijing 639 127 true 639 639 639 639.639 639.639 char639 639 -64 2017-10-01 2017-10-01T00:00 Beijing 64 64 true 64 64 64 64.64 64.64 char64 64 -640 2017-10-01 2017-10-01T00:00 Beijing 640 0 true 640 640 640 640.64 640.64 char640 640 -641 2017-10-01 2017-10-01T00:00 Beijing 641 1 true 641 641 641 641.641 641.641 char641 641 -642 2017-10-01 2017-10-01T00:00 Beijing 642 2 true 642 642 642 642.642 642.642 char642 642 -643 2017-10-01 2017-10-01T00:00 Beijing 643 3 true 643 643 643 643.643 643.643 char643 643 -644 2017-10-01 2017-10-01T00:00 Beijing 644 4 true 644 644 644 644.644 644.644 char644 644 -645 2017-10-01 2017-10-01T00:00 Beijing 645 5 true 645 645 645 645.645 645.645 char645 645 -646 2017-10-01 2017-10-01T00:00 Beijing 646 6 true 646 646 646 646.646 646.646 char646 646 -647 2017-10-01 2017-10-01T00:00 Beijing 647 7 true 647 647 647 647.647 647.647 char647 647 -648 2017-10-01 2017-10-01T00:00 Beijing 648 8 true 648 648 648 648.648 648.648 char648 648 -649 2017-10-01 2017-10-01T00:00 Beijing 649 9 true 649 649 649 649.649 649.649 char649 649 -65 2017-10-01 2017-10-01T00:00 Beijing 65 65 true 65 65 65 65.65 65.65 char65 65 -650 2017-10-01 2017-10-01T00:00 Beijing 650 10 true 650 650 650 650.65 650.65 char650 650 -651 2017-10-01 2017-10-01T00:00 Beijing 651 11 true 651 651 651 651.651 651.651 char651 651 -652 2017-10-01 2017-10-01T00:00 Beijing 652 12 true 652 652 652 652.652 652.652 char652 652 -653 2017-10-01 2017-10-01T00:00 Beijing 653 13 true 653 653 653 653.653 653.653 char653 653 -654 2017-10-01 2017-10-01T00:00 Beijing 654 14 true 654 654 654 654.654 654.654 char654 654 -655 2017-10-01 2017-10-01T00:00 Beijing 655 15 true 655 655 655 655.655 655.655 char655 655 -656 2017-10-01 2017-10-01T00:00 Beijing 656 16 true 656 656 656 656.656 656.656 char656 656 -657 2017-10-01 2017-10-01T00:00 Beijing 657 17 true 657 657 657 657.657 657.657 char657 657 -658 2017-10-01 2017-10-01T00:00 Beijing 658 18 true 658 658 658 658.658 658.658 char658 658 -659 2017-10-01 2017-10-01T00:00 Beijing 659 19 true 659 659 659 659.659 659.659 char659 659 -66 2017-10-01 2017-10-01T00:00 Beijing 66 66 true 66 66 66 66.66 66.66 char66 66 -660 2017-10-01 2017-10-01T00:00 Beijing 660 20 true 660 660 660 660.66 660.66 char660 660 -661 2017-10-01 2017-10-01T00:00 Beijing 661 21 true 661 661 661 661.661 661.661 char661 661 -662 2017-10-01 2017-10-01T00:00 Beijing 662 22 true 662 662 662 662.662 662.662 char662 662 -663 2017-10-01 2017-10-01T00:00 Beijing 663 23 true 663 663 663 663.663 663.663 char663 663 -664 2017-10-01 2017-10-01T00:00 Beijing 664 24 true 664 664 664 664.664 664.664 char664 664 -665 2017-10-01 2017-10-01T00:00 Beijing 665 25 true 665 665 665 665.665 665.665 char665 665 -666 2017-10-01 2017-10-01T00:00 Beijing 666 26 true 666 666 666 666.666 666.666 char666 666 -667 2017-10-01 2017-10-01T00:00 Beijing 667 27 true 667 667 667 667.667 667.667 char667 667 -668 2017-10-01 2017-10-01T00:00 Beijing 668 28 true 668 668 668 668.668 668.668 char668 668 -669 2017-10-01 2017-10-01T00:00 Beijing 669 29 true 669 669 669 669.669 669.669 char669 669 -67 2017-10-01 2017-10-01T00:00 Beijing 67 67 true 67 67 67 67.67 67.67 char67 67 -670 2017-10-01 2017-10-01T00:00 Beijing 670 30 true 670 670 670 670.67 670.67 char670 670 -671 2017-10-01 2017-10-01T00:00 Beijing 671 31 true 671 671 671 671.671 671.671 char671 671 -672 2017-10-01 2017-10-01T00:00 Beijing 672 32 true 672 672 672 672.672 672.672 char672 672 -673 2017-10-01 2017-10-01T00:00 Beijing 673 33 true 673 673 673 673.673 673.673 char673 673 -674 2017-10-01 2017-10-01T00:00 Beijing 674 34 true 674 674 674 674.674 674.674 char674 674 -675 2017-10-01 2017-10-01T00:00 Beijing 675 35 true 675 675 675 675.675 675.675 char675 675 -676 2017-10-01 2017-10-01T00:00 Beijing 676 36 true 676 676 676 676.676 676.676 char676 676 -677 2017-10-01 2017-10-01T00:00 Beijing 677 37 true 677 677 677 677.677 677.677 char677 677 -678 2017-10-01 2017-10-01T00:00 Beijing 678 38 true 678 678 678 678.678 678.678 char678 678 -679 2017-10-01 2017-10-01T00:00 Beijing 679 39 true 679 679 679 679.679 679.679 char679 679 -68 2017-10-01 2017-10-01T00:00 Beijing 68 68 true 68 68 68 68.68 68.68 char68 68 -680 2017-10-01 2017-10-01T00:00 Beijing 680 40 true 680 680 680 680.68 680.68 char680 680 -681 2017-10-01 2017-10-01T00:00 Beijing 681 41 true 681 681 681 681.681 681.681 char681 681 -682 2017-10-01 2017-10-01T00:00 Beijing 682 42 true 682 682 682 682.682 682.682 char682 682 -683 2017-10-01 2017-10-01T00:00 Beijing 683 43 true 683 683 683 683.683 683.683 char683 683 -684 2017-10-01 2017-10-01T00:00 Beijing 684 44 true 684 684 684 684.684 684.684 char684 684 -685 2017-10-01 2017-10-01T00:00 Beijing 685 45 true 685 685 685 685.685 685.685 char685 685 -686 2017-10-01 2017-10-01T00:00 Beijing 686 46 true 686 686 686 686.686 686.686 char686 686 -687 2017-10-01 2017-10-01T00:00 Beijing 687 47 true 687 687 687 687.687 687.687 char687 687 -688 2017-10-01 2017-10-01T00:00 Beijing 688 48 true 688 688 688 688.688 688.688 char688 688 -689 2017-10-01 2017-10-01T00:00 Beijing 689 49 true 689 689 689 689.689 689.689 char689 689 -69 2017-10-01 2017-10-01T00:00 Beijing 69 69 true 69 69 69 69.69 69.69 char69 69 -690 2017-10-01 2017-10-01T00:00 Beijing 690 50 true 690 690 690 690.69 690.69 char690 690 -691 2017-10-01 2017-10-01T00:00 Beijing 691 51 true 691 691 691 691.691 691.691 char691 691 -692 2017-10-01 2017-10-01T00:00 Beijing 692 52 true 692 692 692 692.692 692.692 char692 692 -693 2017-10-01 2017-10-01T00:00 Beijing 693 53 true 693 693 693 693.693 693.693 char693 693 -694 2017-10-01 2017-10-01T00:00 Beijing 694 54 true 694 694 694 694.694 694.694 char694 694 -695 2017-10-01 2017-10-01T00:00 Beijing 695 55 true 695 695 695 695.695 695.695 char695 695 -696 2017-10-01 2017-10-01T00:00 Beijing 696 56 true 696 696 696 696.696 696.696 char696 696 -697 2017-10-01 2017-10-01T00:00 Beijing 697 57 true 697 697 697 697.697 697.697 char697 697 -698 2017-10-01 2017-10-01T00:00 Beijing 698 58 true 698 698 698 698.698 698.698 char698 698 -699 2017-10-01 2017-10-01T00:00 Beijing 699 59 true 699 699 699 699.699 699.699 char699 699 -7 2017-10-01 2017-10-01T00:00 Beijing 7 7 true 7 7 7 7.7 7.7 char7 7 -70 2017-10-01 2017-10-01T00:00 Beijing 70 70 true 70 70 70 70.7 70.7 char70 70 -700 2017-10-01 2017-10-01T00:00 Beijing 700 60 true 700 700 700 700.7 700.7 char700 700 -701 2017-10-01 2017-10-01T00:00 Beijing 701 61 true 701 701 701 701.701 701.701 char701 701 -702 2017-10-01 2017-10-01T00:00 Beijing 702 62 true 702 702 702 702.702 702.702 char702 702 -703 2017-10-01 2017-10-01T00:00 Beijing 703 63 true 703 703 703 703.703 703.703 char703 703 -704 2017-10-01 2017-10-01T00:00 Beijing 704 64 true 704 704 704 704.704 704.704 char704 704 -705 2017-10-01 2017-10-01T00:00 Beijing 705 65 true 705 705 705 705.705 705.705 char705 705 -706 2017-10-01 2017-10-01T00:00 Beijing 706 66 true 706 706 706 706.706 706.706 char706 706 -707 2017-10-01 2017-10-01T00:00 Beijing 707 67 true 707 707 707 707.707 707.707 char707 707 -708 2017-10-01 2017-10-01T00:00 Beijing 708 68 true 708 708 708 708.708 708.708 char708 708 -709 2017-10-01 2017-10-01T00:00 Beijing 709 69 true 709 709 709 709.709 709.709 char709 709 -71 2017-10-01 2017-10-01T00:00 Beijing 71 71 true 71 71 71 71.71 71.71 char71 71 -710 2017-10-01 2017-10-01T00:00 Beijing 710 70 true 710 710 710 710.71 710.71 char710 710 -711 2017-10-01 2017-10-01T00:00 Beijing 711 71 true 711 711 711 711.711 711.711 char711 711 -712 2017-10-01 2017-10-01T00:00 Beijing 712 72 true 712 712 712 712.712 712.712 char712 712 -713 2017-10-01 2017-10-01T00:00 Beijing 713 73 true 713 713 713 713.713 713.713 char713 713 -714 2017-10-01 2017-10-01T00:00 Beijing 714 74 true 714 714 714 714.714 714.714 char714 714 -715 2017-10-01 2017-10-01T00:00 Beijing 715 75 true 715 715 715 715.715 715.715 char715 715 -716 2017-10-01 2017-10-01T00:00 Beijing 716 76 true 716 716 716 716.716 716.716 char716 716 -717 2017-10-01 2017-10-01T00:00 Beijing 717 77 true 717 717 717 717.717 717.717 char717 717 -718 2017-10-01 2017-10-01T00:00 Beijing 718 78 true 718 718 718 718.718 718.718 char718 718 -719 2017-10-01 2017-10-01T00:00 Beijing 719 79 true 719 719 719 719.719 719.719 char719 719 -72 2017-10-01 2017-10-01T00:00 Beijing 72 72 true 72 72 72 72.72 72.72 char72 72 -720 2017-10-01 2017-10-01T00:00 Beijing 720 80 true 720 720 720 720.72 720.72 char720 720 -721 2017-10-01 2017-10-01T00:00 Beijing 721 81 true 721 721 721 721.721 721.721 char721 721 -722 2017-10-01 2017-10-01T00:00 Beijing 722 82 true 722 722 722 722.722 722.722 char722 722 -723 2017-10-01 2017-10-01T00:00 Beijing 723 83 true 723 723 723 723.723 723.723 char723 723 -724 2017-10-01 2017-10-01T00:00 Beijing 724 84 true 724 724 724 724.724 724.724 char724 724 -725 2017-10-01 2017-10-01T00:00 Beijing 725 85 true 725 725 725 725.725 725.725 char725 725 -726 2017-10-01 2017-10-01T00:00 Beijing 726 86 true 726 726 726 726.726 726.726 char726 726 -727 2017-10-01 2017-10-01T00:00 Beijing 727 87 true 727 727 727 727.727 727.727 char727 727 -728 2017-10-01 2017-10-01T00:00 Beijing 728 88 true 728 728 728 728.728 728.728 char728 728 -729 2017-10-01 2017-10-01T00:00 Beijing 729 89 true 729 729 729 729.729 729.729 char729 729 -73 2017-10-01 2017-10-01T00:00 Beijing 73 73 true 73 73 73 73.73 73.73 char73 73 -730 2017-10-01 2017-10-01T00:00 Beijing 730 90 true 730 730 730 730.73 730.73 char730 730 -731 2017-10-01 2017-10-01T00:00 Beijing 731 91 true 731 731 731 731.731 731.731 char731 731 -732 2017-10-01 2017-10-01T00:00 Beijing 732 92 true 732 732 732 732.732 732.732 char732 732 -733 2017-10-01 2017-10-01T00:00 Beijing 733 93 true 733 733 733 733.733 733.733 char733 733 -734 2017-10-01 2017-10-01T00:00 Beijing 734 94 true 734 734 734 734.734 734.734 char734 734 -735 2017-10-01 2017-10-01T00:00 Beijing 735 95 true 735 735 735 735.735 735.735 char735 735 -736 2017-10-01 2017-10-01T00:00 Beijing 736 96 true 736 736 736 736.736 736.736 char736 736 -737 2017-10-01 2017-10-01T00:00 Beijing 737 97 true 737 737 737 737.737 737.737 char737 737 -738 2017-10-01 2017-10-01T00:00 Beijing 738 98 true 738 738 738 738.738 738.738 char738 738 -739 2017-10-01 2017-10-01T00:00 Beijing 739 99 true 739 739 739 739.739 739.739 char739 739 -74 2017-10-01 2017-10-01T00:00 Beijing 74 74 true 74 74 74 74.74 74.74 char74 74 -740 2017-10-01 2017-10-01T00:00 Beijing 740 100 true 740 740 740 740.74 740.74 char740 740 -741 2017-10-01 2017-10-01T00:00 Beijing 741 101 true 741 741 741 741.741 741.741 char741 741 -742 2017-10-01 2017-10-01T00:00 Beijing 742 102 true 742 742 742 742.742 742.742 char742 742 -743 2017-10-01 2017-10-01T00:00 Beijing 743 103 true 743 743 743 743.743 743.743 char743 743 -744 2017-10-01 2017-10-01T00:00 Beijing 744 104 true 744 744 744 744.744 744.744 char744 744 -745 2017-10-01 2017-10-01T00:00 Beijing 745 105 true 745 745 745 745.745 745.745 char745 745 -746 2017-10-01 2017-10-01T00:00 Beijing 746 106 true 746 746 746 746.746 746.746 char746 746 -747 2017-10-01 2017-10-01T00:00 Beijing 747 107 true 747 747 747 747.747 747.747 char747 747 -748 2017-10-01 2017-10-01T00:00 Beijing 748 108 true 748 748 748 748.748 748.748 char748 748 -749 2017-10-01 2017-10-01T00:00 Beijing 749 109 true 749 749 749 749.749 749.749 char749 749 -75 2017-10-01 2017-10-01T00:00 Beijing 75 75 true 75 75 75 75.75 75.75 char75 75 -750 2017-10-01 2017-10-01T00:00 Beijing 750 110 true 750 750 750 750.75 750.75 char750 750 -751 2017-10-01 2017-10-01T00:00 Beijing 751 111 true 751 751 751 751.751 751.751 char751 751 -752 2017-10-01 2017-10-01T00:00 Beijing 752 112 true 752 752 752 752.752 752.752 char752 752 -753 2017-10-01 2017-10-01T00:00 Beijing 753 113 true 753 753 753 753.753 753.753 char753 753 -754 2017-10-01 2017-10-01T00:00 Beijing 754 114 true 754 754 754 754.754 754.754 char754 754 -755 2017-10-01 2017-10-01T00:00 Beijing 755 115 true 755 755 755 755.755 755.755 char755 755 -756 2017-10-01 2017-10-01T00:00 Beijing 756 116 true 756 756 756 756.756 756.756 char756 756 -757 2017-10-01 2017-10-01T00:00 Beijing 757 117 true 757 757 757 757.757 757.757 char757 757 -758 2017-10-01 2017-10-01T00:00 Beijing 758 118 true 758 758 758 758.758 758.758 char758 758 -759 2017-10-01 2017-10-01T00:00 Beijing 759 119 true 759 759 759 759.759 759.759 char759 759 -76 2017-10-01 2017-10-01T00:00 Beijing 76 76 true 76 76 76 76.76 76.76 char76 76 -760 2017-10-01 2017-10-01T00:00 Beijing 760 120 true 760 760 760 760.76 760.76 char760 760 -761 2017-10-01 2017-10-01T00:00 Beijing 761 121 true 761 761 761 761.761 761.761 char761 761 -762 2017-10-01 2017-10-01T00:00 Beijing 762 122 true 762 762 762 762.762 762.762 char762 762 -763 2017-10-01 2017-10-01T00:00 Beijing 763 123 true 763 763 763 763.763 763.763 char763 763 -764 2017-10-01 2017-10-01T00:00 Beijing 764 124 true 764 764 764 764.764 764.764 char764 764 -765 2017-10-01 2017-10-01T00:00 Beijing 765 125 true 765 765 765 765.765 765.765 char765 765 -766 2017-10-01 2017-10-01T00:00 Beijing 766 126 true 766 766 766 766.766 766.766 char766 766 -767 2017-10-01 2017-10-01T00:00 Beijing 767 127 true 767 767 767 767.767 767.767 char767 767 -768 2017-10-01 2017-10-01T00:00 Beijing 768 0 true 768 768 768 768.768 768.768 char768 768 -769 2017-10-01 2017-10-01T00:00 Beijing 769 1 true 769 769 769 769.769 769.769 char769 769 -77 2017-10-01 2017-10-01T00:00 Beijing 77 77 true 77 77 77 77.77 77.77 char77 77 -770 2017-10-01 2017-10-01T00:00 Beijing 770 2 true 770 770 770 770.77 770.77 char770 770 -771 2017-10-01 2017-10-01T00:00 Beijing 771 3 true 771 771 771 771.771 771.771 char771 771 -772 2017-10-01 2017-10-01T00:00 Beijing 772 4 true 772 772 772 772.772 772.772 char772 772 -773 2017-10-01 2017-10-01T00:00 Beijing 773 5 true 773 773 773 773.773 773.773 char773 773 -774 2017-10-01 2017-10-01T00:00 Beijing 774 6 true 774 774 774 774.774 774.774 char774 774 -775 2017-10-01 2017-10-01T00:00 Beijing 775 7 true 775 775 775 775.775 775.775 char775 775 -776 2017-10-01 2017-10-01T00:00 Beijing 776 8 true 776 776 776 776.776 776.776 char776 776 -777 2017-10-01 2017-10-01T00:00 Beijing 777 9 true 777 777 777 777.777 777.777 char777 777 -778 2017-10-01 2017-10-01T00:00 Beijing 778 10 true 778 778 778 778.778 778.778 char778 778 -779 2017-10-01 2017-10-01T00:00 Beijing 779 11 true 779 779 779 779.779 779.779 char779 779 -78 2017-10-01 2017-10-01T00:00 Beijing 78 78 true 78 78 78 78.78 78.78 char78 78 -780 2017-10-01 2017-10-01T00:00 Beijing 780 12 true 780 780 780 780.78 780.78 char780 780 -781 2017-10-01 2017-10-01T00:00 Beijing 781 13 true 781 781 781 781.781 781.781 char781 781 -782 2017-10-01 2017-10-01T00:00 Beijing 782 14 true 782 782 782 782.782 782.782 char782 782 -783 2017-10-01 2017-10-01T00:00 Beijing 783 15 true 783 783 783 783.783 783.783 char783 783 -784 2017-10-01 2017-10-01T00:00 Beijing 784 16 true 784 784 784 784.784 784.784 char784 784 -785 2017-10-01 2017-10-01T00:00 Beijing 785 17 true 785 785 785 785.785 785.785 char785 785 -786 2017-10-01 2017-10-01T00:00 Beijing 786 18 true 786 786 786 786.786 786.786 char786 786 -787 2017-10-01 2017-10-01T00:00 Beijing 787 19 true 787 787 787 787.787 787.787 char787 787 -788 2017-10-01 2017-10-01T00:00 Beijing 788 20 true 788 788 788 788.788 788.788 char788 788 -789 2017-10-01 2017-10-01T00:00 Beijing 789 21 true 789 789 789 789.789 789.789 char789 789 -79 2017-10-01 2017-10-01T00:00 Beijing 79 79 true 79 79 79 79.79 79.79 char79 79 -790 2017-10-01 2017-10-01T00:00 Beijing 790 22 true 790 790 790 790.79 790.79 char790 790 -791 2017-10-01 2017-10-01T00:00 Beijing 791 23 true 791 791 791 791.791 791.791 char791 791 -792 2017-10-01 2017-10-01T00:00 Beijing 792 24 true 792 792 792 792.792 792.792 char792 792 -793 2017-10-01 2017-10-01T00:00 Beijing 793 25 true 793 793 793 793.793 793.793 char793 793 -794 2017-10-01 2017-10-01T00:00 Beijing 794 26 true 794 794 794 794.794 794.794 char794 794 -795 2017-10-01 2017-10-01T00:00 Beijing 795 27 true 795 795 795 795.795 795.795 char795 795 -796 2017-10-01 2017-10-01T00:00 Beijing 796 28 true 796 796 796 796.796 796.796 char796 796 -797 2017-10-01 2017-10-01T00:00 Beijing 797 29 true 797 797 797 797.797 797.797 char797 797 -798 2017-10-01 2017-10-01T00:00 Beijing 798 30 true 798 798 798 798.798 798.798 char798 798 -799 2017-10-01 2017-10-01T00:00 Beijing 799 31 true 799 799 799 799.799 799.799 char799 799 -8 2017-10-01 2017-10-01T00:00 Beijing 8 8 true 8 8 8 8.8 8.8 char8 8 -80 2017-10-01 2017-10-01T00:00 Beijing 80 80 true 80 80 80 80.8 80.8 char80 80 -800 2017-10-01 2017-10-01T00:00 Beijing 800 32 true 800 800 800 800.8 800.8 char800 800 -801 2017-10-01 2017-10-01T00:00 Beijing 801 33 true 801 801 801 801.801 801.801 char801 801 -802 2017-10-01 2017-10-01T00:00 Beijing 802 34 true 802 802 802 802.802 802.802 char802 802 -803 2017-10-01 2017-10-01T00:00 Beijing 803 35 true 803 803 803 803.803 803.803 char803 803 -804 2017-10-01 2017-10-01T00:00 Beijing 804 36 true 804 804 804 804.804 804.804 char804 804 -805 2017-10-01 2017-10-01T00:00 Beijing 805 37 true 805 805 805 805.805 805.805 char805 805 -806 2017-10-01 2017-10-01T00:00 Beijing 806 38 true 806 806 806 806.806 806.806 char806 806 -807 2017-10-01 2017-10-01T00:00 Beijing 807 39 true 807 807 807 807.807 807.807 char807 807 -808 2017-10-01 2017-10-01T00:00 Beijing 808 40 true 808 808 808 808.808 808.808 char808 808 -809 2017-10-01 2017-10-01T00:00 Beijing 809 41 true 809 809 809 809.809 809.809 char809 809 -81 2017-10-01 2017-10-01T00:00 Beijing 81 81 true 81 81 81 81.81 81.81 char81 81 -810 2017-10-01 2017-10-01T00:00 Beijing 810 42 true 810 810 810 810.81 810.81 char810 810 -811 2017-10-01 2017-10-01T00:00 Beijing 811 43 true 811 811 811 811.811 811.811 char811 811 -812 2017-10-01 2017-10-01T00:00 Beijing 812 44 true 812 812 812 812.812 812.812 char812 812 -813 2017-10-01 2017-10-01T00:00 Beijing 813 45 true 813 813 813 813.813 813.813 char813 813 -814 2017-10-01 2017-10-01T00:00 Beijing 814 46 true 814 814 814 814.814 814.814 char814 814 -815 2017-10-01 2017-10-01T00:00 Beijing 815 47 true 815 815 815 815.815 815.815 char815 815 -816 2017-10-01 2017-10-01T00:00 Beijing 816 48 true 816 816 816 816.816 816.816 char816 816 -817 2017-10-01 2017-10-01T00:00 Beijing 817 49 true 817 817 817 817.817 817.817 char817 817 -818 2017-10-01 2017-10-01T00:00 Beijing 818 50 true 818 818 818 818.818 818.818 char818 818 -819 2017-10-01 2017-10-01T00:00 Beijing 819 51 true 819 819 819 819.819 819.819 char819 819 -82 2017-10-01 2017-10-01T00:00 Beijing 82 82 true 82 82 82 82.82 82.82 char82 82 -820 2017-10-01 2017-10-01T00:00 Beijing 820 52 true 820 820 820 820.82 820.82 char820 820 -821 2017-10-01 2017-10-01T00:00 Beijing 821 53 true 821 821 821 821.821 821.821 char821 821 -822 2017-10-01 2017-10-01T00:00 Beijing 822 54 true 822 822 822 822.822 822.822 char822 822 -823 2017-10-01 2017-10-01T00:00 Beijing 823 55 true 823 823 823 823.823 823.823 char823 823 -824 2017-10-01 2017-10-01T00:00 Beijing 824 56 true 824 824 824 824.824 824.824 char824 824 -825 2017-10-01 2017-10-01T00:00 Beijing 825 57 true 825 825 825 825.825 825.825 char825 825 -826 2017-10-01 2017-10-01T00:00 Beijing 826 58 true 826 826 826 826.826 826.826 char826 826 -827 2017-10-01 2017-10-01T00:00 Beijing 827 59 true 827 827 827 827.827 827.827 char827 827 -828 2017-10-01 2017-10-01T00:00 Beijing 828 60 true 828 828 828 828.828 828.828 char828 828 -829 2017-10-01 2017-10-01T00:00 Beijing 829 61 true 829 829 829 829.829 829.829 char829 829 -83 2017-10-01 2017-10-01T00:00 Beijing 83 83 true 83 83 83 83.83 83.83 char83 83 -830 2017-10-01 2017-10-01T00:00 Beijing 830 62 true 830 830 830 830.83 830.83 char830 830 -831 2017-10-01 2017-10-01T00:00 Beijing 831 63 true 831 831 831 831.831 831.831 char831 831 -832 2017-10-01 2017-10-01T00:00 Beijing 832 64 true 832 832 832 832.832 832.832 char832 832 -833 2017-10-01 2017-10-01T00:00 Beijing 833 65 true 833 833 833 833.833 833.833 char833 833 -834 2017-10-01 2017-10-01T00:00 Beijing 834 66 true 834 834 834 834.834 834.834 char834 834 -835 2017-10-01 2017-10-01T00:00 Beijing 835 67 true 835 835 835 835.835 835.835 char835 835 -836 2017-10-01 2017-10-01T00:00 Beijing 836 68 true 836 836 836 836.836 836.836 char836 836 -837 2017-10-01 2017-10-01T00:00 Beijing 837 69 true 837 837 837 837.837 837.837 char837 837 -838 2017-10-01 2017-10-01T00:00 Beijing 838 70 true 838 838 838 838.838 838.838 char838 838 -839 2017-10-01 2017-10-01T00:00 Beijing 839 71 true 839 839 839 839.839 839.839 char839 839 -84 2017-10-01 2017-10-01T00:00 Beijing 84 84 true 84 84 84 84.84 84.84 char84 84 -840 2017-10-01 2017-10-01T00:00 Beijing 840 72 true 840 840 840 840.84 840.84 char840 840 -841 2017-10-01 2017-10-01T00:00 Beijing 841 73 true 841 841 841 841.841 841.841 char841 841 -842 2017-10-01 2017-10-01T00:00 Beijing 842 74 true 842 842 842 842.842 842.842 char842 842 -843 2017-10-01 2017-10-01T00:00 Beijing 843 75 true 843 843 843 843.843 843.843 char843 843 -844 2017-10-01 2017-10-01T00:00 Beijing 844 76 true 844 844 844 844.844 844.844 char844 844 -845 2017-10-01 2017-10-01T00:00 Beijing 845 77 true 845 845 845 845.845 845.845 char845 845 -846 2017-10-01 2017-10-01T00:00 Beijing 846 78 true 846 846 846 846.846 846.846 char846 846 -847 2017-10-01 2017-10-01T00:00 Beijing 847 79 true 847 847 847 847.847 847.847 char847 847 -848 2017-10-01 2017-10-01T00:00 Beijing 848 80 true 848 848 848 848.848 848.848 char848 848 -849 2017-10-01 2017-10-01T00:00 Beijing 849 81 true 849 849 849 849.849 849.849 char849 849 -85 2017-10-01 2017-10-01T00:00 Beijing 85 85 true 85 85 85 85.85 85.85 char85 85 -850 2017-10-01 2017-10-01T00:00 Beijing 850 82 true 850 850 850 850.85 850.85 char850 850 -851 2017-10-01 2017-10-01T00:00 Beijing 851 83 true 851 851 851 851.851 851.851 char851 851 -852 2017-10-01 2017-10-01T00:00 Beijing 852 84 true 852 852 852 852.852 852.852 char852 852 -853 2017-10-01 2017-10-01T00:00 Beijing 853 85 true 853 853 853 853.853 853.853 char853 853 -854 2017-10-01 2017-10-01T00:00 Beijing 854 86 true 854 854 854 854.854 854.854 char854 854 -855 2017-10-01 2017-10-01T00:00 Beijing 855 87 true 855 855 855 855.855 855.855 char855 855 -856 2017-10-01 2017-10-01T00:00 Beijing 856 88 true 856 856 856 856.856 856.856 char856 856 -857 2017-10-01 2017-10-01T00:00 Beijing 857 89 true 857 857 857 857.857 857.857 char857 857 -858 2017-10-01 2017-10-01T00:00 Beijing 858 90 true 858 858 858 858.858 858.858 char858 858 -859 2017-10-01 2017-10-01T00:00 Beijing 859 91 true 859 859 859 859.859 859.859 char859 859 -86 2017-10-01 2017-10-01T00:00 Beijing 86 86 true 86 86 86 86.86 86.86 char86 86 -860 2017-10-01 2017-10-01T00:00 Beijing 860 92 true 860 860 860 860.86 860.86 char860 860 -861 2017-10-01 2017-10-01T00:00 Beijing 861 93 true 861 861 861 861.861 861.861 char861 861 -862 2017-10-01 2017-10-01T00:00 Beijing 862 94 true 862 862 862 862.862 862.862 char862 862 -863 2017-10-01 2017-10-01T00:00 Beijing 863 95 true 863 863 863 863.863 863.863 char863 863 -864 2017-10-01 2017-10-01T00:00 Beijing 864 96 true 864 864 864 864.864 864.864 char864 864 -865 2017-10-01 2017-10-01T00:00 Beijing 865 97 true 865 865 865 865.865 865.865 char865 865 -866 2017-10-01 2017-10-01T00:00 Beijing 866 98 true 866 866 866 866.866 866.866 char866 866 -867 2017-10-01 2017-10-01T00:00 Beijing 867 99 true 867 867 867 867.867 867.867 char867 867 -868 2017-10-01 2017-10-01T00:00 Beijing 868 100 true 868 868 868 868.868 868.868 char868 868 -869 2017-10-01 2017-10-01T00:00 Beijing 869 101 true 869 869 869 869.869 869.869 char869 869 -87 2017-10-01 2017-10-01T00:00 Beijing 87 87 true 87 87 87 87.87 87.87 char87 87 -870 2017-10-01 2017-10-01T00:00 Beijing 870 102 true 870 870 870 870.87 870.87 char870 870 -871 2017-10-01 2017-10-01T00:00 Beijing 871 103 true 871 871 871 871.871 871.871 char871 871 -872 2017-10-01 2017-10-01T00:00 Beijing 872 104 true 872 872 872 872.872 872.872 char872 872 -873 2017-10-01 2017-10-01T00:00 Beijing 873 105 true 873 873 873 873.873 873.873 char873 873 -874 2017-10-01 2017-10-01T00:00 Beijing 874 106 true 874 874 874 874.874 874.874 char874 874 -875 2017-10-01 2017-10-01T00:00 Beijing 875 107 true 875 875 875 875.875 875.875 char875 875 -876 2017-10-01 2017-10-01T00:00 Beijing 876 108 true 876 876 876 876.876 876.876 char876 876 -877 2017-10-01 2017-10-01T00:00 Beijing 877 109 true 877 877 877 877.877 877.877 char877 877 -878 2017-10-01 2017-10-01T00:00 Beijing 878 110 true 878 878 878 878.878 878.878 char878 878 -879 2017-10-01 2017-10-01T00:00 Beijing 879 111 true 879 879 879 879.879 879.879 char879 879 -88 2017-10-01 2017-10-01T00:00 Beijing 88 88 true 88 88 88 88.88 88.88 char88 88 -880 2017-10-01 2017-10-01T00:00 Beijing 880 112 true 880 880 880 880.88 880.88 char880 880 -881 2017-10-01 2017-10-01T00:00 Beijing 881 113 true 881 881 881 881.881 881.881 char881 881 -882 2017-10-01 2017-10-01T00:00 Beijing 882 114 true 882 882 882 882.882 882.882 char882 882 -883 2017-10-01 2017-10-01T00:00 Beijing 883 115 true 883 883 883 883.883 883.883 char883 883 -884 2017-10-01 2017-10-01T00:00 Beijing 884 116 true 884 884 884 884.884 884.884 char884 884 -885 2017-10-01 2017-10-01T00:00 Beijing 885 117 true 885 885 885 885.885 885.885 char885 885 -886 2017-10-01 2017-10-01T00:00 Beijing 886 118 true 886 886 886 886.886 886.886 char886 886 -887 2017-10-01 2017-10-01T00:00 Beijing 887 119 true 887 887 887 887.887 887.887 char887 887 -888 2017-10-01 2017-10-01T00:00 Beijing 888 120 true 888 888 888 888.888 888.888 char888 888 -889 2017-10-01 2017-10-01T00:00 Beijing 889 121 true 889 889 889 889.889 889.889 char889 889 -89 2017-10-01 2017-10-01T00:00 Beijing 89 89 true 89 89 89 89.89 89.89 char89 89 -890 2017-10-01 2017-10-01T00:00 Beijing 890 122 true 890 890 890 890.89 890.89 char890 890 -891 2017-10-01 2017-10-01T00:00 Beijing 891 123 true 891 891 891 891.891 891.891 char891 891 -892 2017-10-01 2017-10-01T00:00 Beijing 892 124 true 892 892 892 892.892 892.892 char892 892 -893 2017-10-01 2017-10-01T00:00 Beijing 893 125 true 893 893 893 893.893 893.893 char893 893 -894 2017-10-01 2017-10-01T00:00 Beijing 894 126 true 894 894 894 894.894 894.894 char894 894 -895 2017-10-01 2017-10-01T00:00 Beijing 895 127 true 895 895 895 895.895 895.895 char895 895 -896 2017-10-01 2017-10-01T00:00 Beijing 896 0 true 896 896 896 896.896 896.896 char896 896 -897 2017-10-01 2017-10-01T00:00 Beijing 897 1 true 897 897 897 897.897 897.897 char897 897 -898 2017-10-01 2017-10-01T00:00 Beijing 898 2 true 898 898 898 898.898 898.898 char898 898 -899 2017-10-01 2017-10-01T00:00 Beijing 899 3 true 899 899 899 899.899 899.899 char899 899 -9 2017-10-01 2017-10-01T00:00 Beijing 9 9 true 9 9 9 9.9 9.9 char9 9 -90 2017-10-01 2017-10-01T00:00 Beijing 90 90 true 90 90 90 90.9 90.9 char90 90 -900 2017-10-01 2017-10-01T00:00 Beijing 900 4 true 900 900 900 900.9 900.9 char900 900 -901 2017-10-01 2017-10-01T00:00 Beijing 901 5 true 901 901 901 901.901 901.901 char901 901 -902 2017-10-01 2017-10-01T00:00 Beijing 902 6 true 902 902 902 902.902 902.902 char902 902 -903 2017-10-01 2017-10-01T00:00 Beijing 903 7 true 903 903 903 903.903 903.903 char903 903 -904 2017-10-01 2017-10-01T00:00 Beijing 904 8 true 904 904 904 904.904 904.904 char904 904 -905 2017-10-01 2017-10-01T00:00 Beijing 905 9 true 905 905 905 905.905 905.905 char905 905 -906 2017-10-01 2017-10-01T00:00 Beijing 906 10 true 906 906 906 906.906 906.906 char906 906 -907 2017-10-01 2017-10-01T00:00 Beijing 907 11 true 907 907 907 907.907 907.907 char907 907 -908 2017-10-01 2017-10-01T00:00 Beijing 908 12 true 908 908 908 908.908 908.908 char908 908 -909 2017-10-01 2017-10-01T00:00 Beijing 909 13 true 909 909 909 909.909 909.909 char909 909 -91 2017-10-01 2017-10-01T00:00 Beijing 91 91 true 91 91 91 91.91 91.91 char91 91 -910 2017-10-01 2017-10-01T00:00 Beijing 910 14 true 910 910 910 910.91 910.91 char910 910 -911 2017-10-01 2017-10-01T00:00 Beijing 911 15 true 911 911 911 911.911 911.911 char911 911 -912 2017-10-01 2017-10-01T00:00 Beijing 912 16 true 912 912 912 912.912 912.912 char912 912 -913 2017-10-01 2017-10-01T00:00 Beijing 913 17 true 913 913 913 913.913 913.913 char913 913 -914 2017-10-01 2017-10-01T00:00 Beijing 914 18 true 914 914 914 914.914 914.914 char914 914 -915 2017-10-01 2017-10-01T00:00 Beijing 915 19 true 915 915 915 915.915 915.915 char915 915 -916 2017-10-01 2017-10-01T00:00 Beijing 916 20 true 916 916 916 916.916 916.916 char916 916 -917 2017-10-01 2017-10-01T00:00 Beijing 917 21 true 917 917 917 917.917 917.917 char917 917 -918 2017-10-01 2017-10-01T00:00 Beijing 918 22 true 918 918 918 918.918 918.918 char918 918 -919 2017-10-01 2017-10-01T00:00 Beijing 919 23 true 919 919 919 919.919 919.919 char919 919 -92 2017-10-01 2017-10-01T00:00 Beijing 92 92 true 92 92 92 92.92 92.92 char92 92 -920 2017-10-01 2017-10-01T00:00 Beijing 920 24 true 920 920 920 920.92 920.92 char920 920 -921 2017-10-01 2017-10-01T00:00 Beijing 921 25 true 921 921 921 921.921 921.921 char921 921 -922 2017-10-01 2017-10-01T00:00 Beijing 922 26 true 922 922 922 922.922 922.922 char922 922 -923 2017-10-01 2017-10-01T00:00 Beijing 923 27 true 923 923 923 923.923 923.923 char923 923 -924 2017-10-01 2017-10-01T00:00 Beijing 924 28 true 924 924 924 924.924 924.924 char924 924 -925 2017-10-01 2017-10-01T00:00 Beijing 925 29 true 925 925 925 925.925 925.925 char925 925 -926 2017-10-01 2017-10-01T00:00 Beijing 926 30 true 926 926 926 926.926 926.926 char926 926 -927 2017-10-01 2017-10-01T00:00 Beijing 927 31 true 927 927 927 927.927 927.927 char927 927 -928 2017-10-01 2017-10-01T00:00 Beijing 928 32 true 928 928 928 928.928 928.928 char928 928 -929 2017-10-01 2017-10-01T00:00 Beijing 929 33 true 929 929 929 929.929 929.929 char929 929 -93 2017-10-01 2017-10-01T00:00 Beijing 93 93 true 93 93 93 93.93 93.93 char93 93 -930 2017-10-01 2017-10-01T00:00 Beijing 930 34 true 930 930 930 930.93 930.93 char930 930 -931 2017-10-01 2017-10-01T00:00 Beijing 931 35 true 931 931 931 931.931 931.931 char931 931 -932 2017-10-01 2017-10-01T00:00 Beijing 932 36 true 932 932 932 932.932 932.932 char932 932 -933 2017-10-01 2017-10-01T00:00 Beijing 933 37 true 933 933 933 933.933 933.933 char933 933 -934 2017-10-01 2017-10-01T00:00 Beijing 934 38 true 934 934 934 934.934 934.934 char934 934 -935 2017-10-01 2017-10-01T00:00 Beijing 935 39 true 935 935 935 935.935 935.935 char935 935 -936 2017-10-01 2017-10-01T00:00 Beijing 936 40 true 936 936 936 936.936 936.936 char936 936 -937 2017-10-01 2017-10-01T00:00 Beijing 937 41 true 937 937 937 937.937 937.937 char937 937 -938 2017-10-01 2017-10-01T00:00 Beijing 938 42 true 938 938 938 938.938 938.938 char938 938 -939 2017-10-01 2017-10-01T00:00 Beijing 939 43 true 939 939 939 939.939 939.939 char939 939 -94 2017-10-01 2017-10-01T00:00 Beijing 94 94 true 94 94 94 94.94 94.94 char94 94 -940 2017-10-01 2017-10-01T00:00 Beijing 940 44 true 940 940 940 940.94 940.94 char940 940 -941 2017-10-01 2017-10-01T00:00 Beijing 941 45 true 941 941 941 941.941 941.941 char941 941 -942 2017-10-01 2017-10-01T00:00 Beijing 942 46 true 942 942 942 942.942 942.942 char942 942 -943 2017-10-01 2017-10-01T00:00 Beijing 943 47 true 943 943 943 943.943 943.943 char943 943 -944 2017-10-01 2017-10-01T00:00 Beijing 944 48 true 944 944 944 944.944 944.944 char944 944 -945 2017-10-01 2017-10-01T00:00 Beijing 945 49 true 945 945 945 945.945 945.945 char945 945 -946 2017-10-01 2017-10-01T00:00 Beijing 946 50 true 946 946 946 946.946 946.946 char946 946 -947 2017-10-01 2017-10-01T00:00 Beijing 947 51 true 947 947 947 947.947 947.947 char947 947 -948 2017-10-01 2017-10-01T00:00 Beijing 948 52 true 948 948 948 948.948 948.948 char948 948 -949 2017-10-01 2017-10-01T00:00 Beijing 949 53 true 949 949 949 949.949 949.949 char949 949 -95 2017-10-01 2017-10-01T00:00 Beijing 95 95 true 95 95 95 95.95 95.95 char95 95 -950 2017-10-01 2017-10-01T00:00 Beijing 950 54 true 950 950 950 950.95 950.95 char950 950 -951 2017-10-01 2017-10-01T00:00 Beijing 951 55 true 951 951 951 951.951 951.951 char951 951 -952 2017-10-01 2017-10-01T00:00 Beijing 952 56 true 952 952 952 952.952 952.952 char952 952 -953 2017-10-01 2017-10-01T00:00 Beijing 953 57 true 953 953 953 953.953 953.953 char953 953 -954 2017-10-01 2017-10-01T00:00 Beijing 954 58 true 954 954 954 954.954 954.954 char954 954 -955 2017-10-01 2017-10-01T00:00 Beijing 955 59 true 955 955 955 955.955 955.955 char955 955 -956 2017-10-01 2017-10-01T00:00 Beijing 956 60 true 956 956 956 956.956 956.956 char956 956 -957 2017-10-01 2017-10-01T00:00 Beijing 957 61 true 957 957 957 957.957 957.957 char957 957 -958 2017-10-01 2017-10-01T00:00 Beijing 958 62 true 958 958 958 958.958 958.958 char958 958 -959 2017-10-01 2017-10-01T00:00 Beijing 959 63 true 959 959 959 959.959 959.959 char959 959 -96 2017-10-01 2017-10-01T00:00 Beijing 96 96 true 96 96 96 96.96 96.96 char96 96 -960 2017-10-01 2017-10-01T00:00 Beijing 960 64 true 960 960 960 960.96 960.96 char960 960 -961 2017-10-01 2017-10-01T00:00 Beijing 961 65 true 961 961 961 961.961 961.961 char961 961 -962 2017-10-01 2017-10-01T00:00 Beijing 962 66 true 962 962 962 962.962 962.962 char962 962 -963 2017-10-01 2017-10-01T00:00 Beijing 963 67 true 963 963 963 963.963 963.963 char963 963 -964 2017-10-01 2017-10-01T00:00 Beijing 964 68 true 964 964 964 964.964 964.964 char964 964 -965 2017-10-01 2017-10-01T00:00 Beijing 965 69 true 965 965 965 965.965 965.965 char965 965 -966 2017-10-01 2017-10-01T00:00 Beijing 966 70 true 966 966 966 966.966 966.966 char966 966 -967 2017-10-01 2017-10-01T00:00 Beijing 967 71 true 967 967 967 967.967 967.967 char967 967 -968 2017-10-01 2017-10-01T00:00 Beijing 968 72 true 968 968 968 968.968 968.968 char968 968 -969 2017-10-01 2017-10-01T00:00 Beijing 969 73 true 969 969 969 969.969 969.969 char969 969 -97 2017-10-01 2017-10-01T00:00 Beijing 97 97 true 97 97 97 97.97 97.97 char97 97 -970 2017-10-01 2017-10-01T00:00 Beijing 970 74 true 970 970 970 970.97 970.97 char970 970 -971 2017-10-01 2017-10-01T00:00 Beijing 971 75 true 971 971 971 971.971 971.971 char971 971 -972 2017-10-01 2017-10-01T00:00 Beijing 972 76 true 972 972 972 972.972 972.972 char972 972 -973 2017-10-01 2017-10-01T00:00 Beijing 973 77 true 973 973 973 973.973 973.973 char973 973 -974 2017-10-01 2017-10-01T00:00 Beijing 974 78 true 974 974 974 974.974 974.974 char974 974 -975 2017-10-01 2017-10-01T00:00 Beijing 975 79 true 975 975 975 975.975 975.975 char975 975 -976 2017-10-01 2017-10-01T00:00 Beijing 976 80 true 976 976 976 976.976 976.976 char976 976 -977 2017-10-01 2017-10-01T00:00 Beijing 977 81 true 977 977 977 977.977 977.977 char977 977 -978 2017-10-01 2017-10-01T00:00 Beijing 978 82 true 978 978 978 978.978 978.978 char978 978 -979 2017-10-01 2017-10-01T00:00 Beijing 979 83 true 979 979 979 979.979 979.979 char979 979 -98 2017-10-01 2017-10-01T00:00 Beijing 98 98 true 98 98 98 98.98 98.98 char98 98 -980 2017-10-01 2017-10-01T00:00 Beijing 980 84 true 980 980 980 980.98 980.98 char980 980 -981 2017-10-01 2017-10-01T00:00 Beijing 981 85 true 981 981 981 981.981 981.981 char981 981 -982 2017-10-01 2017-10-01T00:00 Beijing 982 86 true 982 982 982 982.982 982.982 char982 982 -983 2017-10-01 2017-10-01T00:00 Beijing 983 87 true 983 983 983 983.983 983.983 char983 983 -984 2017-10-01 2017-10-01T00:00 Beijing 984 88 true 984 984 984 984.984 984.984 char984 984 -985 2017-10-01 2017-10-01T00:00 Beijing 985 89 true 985 985 985 985.985 985.985 char985 985 -986 2017-10-01 2017-10-01T00:00 Beijing 986 90 true 986 986 986 986.986 986.986 char986 986 -987 2017-10-01 2017-10-01T00:00 Beijing 987 91 true 987 987 987 987.987 987.987 char987 987 -988 2017-10-01 2017-10-01T00:00 Beijing 988 92 true 988 988 988 988.988 988.988 char988 988 -989 2017-10-01 2017-10-01T00:00 Beijing 989 93 true 989 989 989 989.989 989.989 char989 989 -99 2017-10-01 2017-10-01T00:00 Beijing 99 99 true 99 99 99 99.99 99.99 char99 99 -990 2017-10-01 2017-10-01T00:00 Beijing 990 94 true 990 990 990 990.99 990.99 char990 990 -991 2017-10-01 2017-10-01T00:00 Beijing 991 95 true 991 991 991 991.991 991.991 char991 991 -992 2017-10-01 2017-10-01T00:00 Beijing 992 96 true 992 992 992 992.992 992.992 char992 992 -993 2017-10-01 2017-10-01T00:00 Beijing 993 97 true 993 993 993 993.993 993.993 char993 993 -994 2017-10-01 2017-10-01T00:00 Beijing 994 98 true 994 994 994 994.994 994.994 char994 994 -995 2017-10-01 2017-10-01T00:00 Beijing 995 99 true 995 995 995 995.995 995.995 char995 995 -996 2017-10-01 2017-10-01T00:00 Beijing 996 100 true 996 996 996 996.996 996.996 char996 996 -997 2017-10-01 2017-10-01T00:00 Beijing 997 101 true 997 997 997 997.997 997.997 char997 997 -998 2017-10-01 2017-10-01T00:00 Beijing 998 102 true 998 998 998 998.998 998.998 char998 998 -999 2017-10-01 2017-10-01T00:00 Beijing 999 103 true 999 999 999 999.999 999.999 char999 999 +-8931695061434156518 2023-09-26 2024-07-30T04:45:09 sEbUDXm6TiExnxayd -718743868 -1020225994 true -217881155 -898739274693207285 5327644655779443260 -15151.082 -8.89773650358034E8 oiHazUcaWQ \N +-8932178348897915948 2024-08-05 2024-02-12T07:53:12 -1465612440 -3249990 false -820405423 7663987118360857249 2209335448013719960 -3278.7039 1.439820324419633E9 WUy2AgWG46 -248775456844895111.835781936 +-8932444864146308918 2023-11-09 2024-03-12T00:23:01 HAg9JIdnpW0rRbDYahF \N -1808569134 true 67960571 952481159663271269 5152538827551074994 10742.792 -1.948654324681867E9 NtK3mtrzp5 -942151122692414959.155648678 +-8932700647988500165 2024-05-21 2023-10-08T19:32:27 v -133455786 -1665762361 false 1981930223 -8660598485686960719 6615545158287690906 -4915.241 2.138459695408872E9 qCTonJAuMp 284516059431518653.500070310 +-8932748201093227315 2024-05-11 2023-09-16T00:18:26 2TOIL2M009st5mf9TRQH -1877008356 921305362 false -1981616303 1236009165395388977 4951568993329666383 19377.445 -1.129372722812793E9 0UqQZclRaW 593612019225871743.642610907 +-8932854226583615625 2024-06-16 2024-04-16T02:38:35 n3IrFFYXdg 1937961415 1140831817 true -5282339 209293407048440791 -6623885582517676656 10019.616 1.026697848039003E9 ZP2SOVZd3p \N +-8934610682875691675 2024-05-08 2024-08-30T02:56:30 P6rlBfZ9AUCRu7KnhKFz 1255212559 933064629 \N 575244866 6035866710373691884 -2341082397666838816 19251.998 -1.69903899906861E8 TlLwezas7N -536160800468555771.746195900 +-8934900117543177850 2024-02-10 2024-02-10T15:05 \N -1032323488 748375442 true 811991293 -4936464562648183638 6237150716015605315 -4243.6113 1.477436358853433E9 K1Oan7GN8c 631637423063873061.292243546 +-8934987280773468768 2024-03-19 2023-11-16T11:36:21 rMuxc5Sek 2039296505 -2079486568 false 1303854553 -3661194869993663245 -2549669635774667020 15324.969 -1.82263870466564E9 Lc8QfuqITB 295730984743778602.238822183 +-8935120073660008753 2024-01-13 2023-11-26T03:34:12 82F -498012824 \N false -301982097 -64293229032266302 -9115745771549714597 31642.006 -7.72242789931331E8 imdnV1msvw 752670741269287212.142321974 +-8935134632453027334 2024-06-26 2024-03-13T13:49:36 xJPi 1839438517 1039420147 true 298051928 6920982538525365578 164406100642855992 18017.057 1.822239954559808E9 EVsnhYkc63 -352848988938761193.978073195 +-8935964243339007923 2024-04-03 2024-04-15T12:49:58 mh7Pq6qEE 50201083 1627672451 true -1887394309 9047858001655478678 5416047434210200241 2871.9578 1.685091173942362E9 iWfzeYHG4m 864723915956658670.698941105 +-8936912923839106284 2023-11-16 2023-10-05T12:16:53 gKtSPc2DEye9mpkI -1809588467 1577075526 false 708917816 -5449624779437925442 7570208151208316664 31213.158 -9.29544544092673E8 Rar2Wqr584 826284977451264477.372559089 +-8937420097837727682 2023-12-26 2024-05-03T08:16:49 IkDJnnJ6u -310799468 1340958160 true -1710829299 2177952597455873554 2580368023322879397 18519.09 8.07277222209446E8 eEl3WnwaOk 881286985418063373.440247720 +-8937673767114084651 2024-08-26 2023-10-04T13:47:17 G5KFqOtBF5PA -1805583917 1375379641 false -1971116326 2095138201740501249 -4640246009131126610 -12746.678 -1.828042606123223E9 \N -748798801813059902.149967378 +-8938023405872773334 2024-02-02 2023-09-04T22:50:31 dqZ53pfVfW 1565484139 -1493376042 false -852295211 -2026805279476508375 7895457365674465638 29355.57 7.43357842199929E8 L5rTy5C8ic 706639156309050970.830764821 +-8938268806048183571 2024-07-06 2024-08-06T18:49:48 Z80RZvtK17BTr5Z7cyEe 2121272844 431073772 true 503141976 76688655736289327 4325626328337784956 -26969.863 -9.72256542069979E8 gentwmrly7 160130640088003839.497448292 +-8938684886664048695 2023-09-25 2024-06-22T22:13:57 W 234656185 -811592656 true -1336086915 3962341542706404768 -8983204598628439065 2259.4385 -2.24002988682051E8 VuoLQgUsfv -741033077834012985.175718156 +-8938849916336777886 2024-05-12 2024-05-22T00:56:29 GwWk0IV 1298394666 -81578604 false -696194342 -7153764595050722977 5478585014415622921 25246.203 -1.183462156387111E9 VfuZs3H9kY 847098580723458533.379506017 +-8939295780825140792 2024-04-02 2023-12-26T19:31:10 221995899 469684878 false 1990291210 1507030373386747962 991403429606157739 25351.418 -7.43400533579426E8 7X2JWPYl93 967482168964299955.990882890 +-8939601208470041071 2024-04-23 2024-05-26T20:17:08 U7BghkFV 1143482039 112870979 true 898177166 5646009457300435915 5563535897547191788 -1699.4708 -4.29737153243021E8 kBWd27fqhR 201137674969266084.559277164 +-8939764776762228482 2023-10-17 2024-07-19T18:57:18 qpkXGtBppmavwq -850421965 260221278 false 633489083 7713064983305240885 196167945836718415 \N 1.375214644040756E9 zcYOZvHkmZ 443325302528989004.423158017 +-8939851909449360954 2023-10-23 2024-08-27T02:44:50 kKVPGXm2Xh7 798431478 1838301460 true -1518718757 -765020894667040967 -7150129223560246676 28470.84 -8.17805560607757E8 p6QBLIunuT -322364889609255991.328645467 +-8940438390614766350 2023-10-09 2024-02-22T22:34:48 sLG 1964193060 374159998 false 974772325 6475233342886866335 7353994465490962910 12143.463 1.575020507776713E9 uAE5GDIg7D -160839654252291017.555269259 +-8940678706697114477 2024-08-28 2024-03-19T12:04:37 xr6PCpIjchOe7 1918453728 -745886346 true 1855388379 1995815468283751656 -2471776217838015741 -9090.592 -2.114225344088645E9 ERAT0n3DgQ 900619855493263438.504146899 +-8941490606142173749 2023-12-17 2023-12-05T13:49:22 kbZbc -532012643 \N false -1988320210 -5116861187374967029 3052541654561418696 -1598.9381 8.5600244755013E8 opVdDV5BYd 10490078461821887.189982720 +-8941935833113814367 2023-12-27 2023-09-10T15:08:48 5u4zu6sO2s3X -585369897 1025816849 true 395495813 2535079711705384469 3395373014276128942 3360.1978 1.66168514942364E8 tjX1DAW3AU -791710563414877647.159844483 +-8942403499083249823 2024-07-15 2024-06-07T03:11:35 dvYmZrVhhNDIG 312954881 1108202466 false 1656043982 2090266607404777353 4892189307715346365 22841.014 \N tT3KIro4lr 8515544311771249.569994131 +-8942722984646561920 2024-03-22 2024-02-25T08:03:15 IRS2 -255224958 -1863528932 false 1617674887 4255785983050208278 -158088941853535926 9554.143 5.34209677864029E8 CWSr5WZ1cB -618208009183387494.596914041 +-8942792148362418516 2024-04-27 2023-12-07T09:06:07 Rv4 870251169 -85457091 true -379783592 -4535616406763540411 3374382721506721388 -5927.9175 7.49308602866272E8 0k1K9zmcQv 827044112119604072.919169800 +-8942808233707867922 2024-02-02 2024-06-19T11:47:24 FxIF3HjaFCwoPVLZ -387453661 -1049561082 true 1355772668 6330632402617617003 -6332197220493191426 28091.215 1.752148133989041E9 oEUMtHlHQi -789404161229899291.224552965 +-8942828097621097788 2024-08-19 2024-02-21T05:40:30 89IrNRtwBD -950662822 -1846802853 true 1639714253 3529975023273953717 -3344698246848736031 24449.098 9.42226884963569E8 r0aRAW94mH -513345586320579570.186000196 +-8942978611746853759 2023-09-17 2024-02-21T07:59:26 2tmjNCZTEsxDUROWQ -601585736 -124622686 false -1723381924 -5129533577176335485 4951899506354938509 -19526.76 1.918894791679604E9 VRjsQGy2Nu \N +-8943067788938114583 2024-05-18 2024-07-05T16:06:37 ukUq8quFfg 1271331283 -1810686885 false -1882913754 5346268308250967696 6593033658249097126 -16502.459 -1.4677542812149E7 XnIpijerUC -863494726779540485.855524591 +-8943381116334474705 2024-03-17 2023-12-18T06:30:17 UoIvdzlMt9s4 \N \N false -1825966241 8596548727482147336 \N -8195.287 5.21509533885784E8 DpaQyQTRsG -139308673945460818.628274560 +-8943393654714911405 2024-08-26 2023-12-04T02:47:35 d3sxACTvIG3RhF 565281832 1924587603 false 721320612 6352788669101384351 8404289735627776012 -10455.67 -2.050928204114686E9 fwxQUwVK8Y -768121667477349503.691254774 +-8943410412599520214 2024-02-10 2024-01-12T18:29:11 \N 1282568115 \N true \N -2327772868968001321 -6363651559215072811 14645.886 -2.024668175766024E9 edQTTcpryD -133263664591975847.811862201 +-8943443117122796531 2023-10-17 2024-02-04T02:22:46 u 124288281 706376252 true 444233147 -5188477394101602050 218932793031944515 -4092.5427 1.759573045478306E9 S8A5UJ7Bp1 388525901239840888.999773134 +-8943541352407472389 2024-04-30 2024-05-01T01:38:28 \N 708642650 -1128222733 \N -1323006591 3494546751094522062 -4343517836206590973 13318.771 -1.942626285392801E9 BdkYOgp6mY -944949856548509044.137389719 +-8944019207770793123 2023-10-01 2023-11-19T19:31:16 WTZmcne9pUfJ6US9INC -151241826 -1899196848 true 1349130292 -7583521896323640595 2278067334645364213 9792.421 -1.10212040483195E9 z595ILsmpb -10262515905940261.513798685 +-8944021097091988643 2024-04-21 2024-08-21T18:59:17 PFmywDBk4ip1 15642688 1193454372 true 1846103993 1961225855420676449 -1401293863400085200 -4450.154 -9.22477686707991E8 RcG6OgaOg5 88961697323262794.505813520 +-8944384823572184915 2023-11-12 2023-12-14T04:18:33 l4Xd 549836335 -1135183301 false -793705773 -8900521401354559172 1553557972967275917 7674.556 3.056576384474E7 8DbMzvPBQt -586001410152514918.258321435 +-8944518358497155935 2024-06-19 2024-06-30T07:23:06 gnuSu -2056108273 \N true 1190715502 \N -7193836453596874061 29469.812 1.539926753804128E9 twqryyDl6g -873089235505790623.766207330 +-8944774182173422160 2024-04-28 2024-06-12T07:48:13 enVivYSjdjF7K8veR5s -1807074816 519000134 false 1948203846 7847040956615243063 8729326872044294254 -25924.273 -4.87630878041586E8 y9aEl5e6Wv -85745403744857745.584471092 +-8944880143817160808 2024-06-27 2024-04-22T10:18:38 1sZB6c0vZT -122265183 -666747646 true 1964280670 6208660909187948973 -6732340670811146898 22005.406 -1.756624500653008E9 9xIEvq73Io 881061453112760256.421243820 +-8945166419140125443 2024-02-03 2024-03-12T12:18:19 NwdKBPHjbTVhlMMp8I 1384009289 378855338 \N -1150681506 -7634615148212652955 4525614227344557899 -18795.074 6.27560220864939E8 k5RdFGu6Kv 105366579535070876.829497884 +-8946843167831446403 2023-09-30 2024-05-30T19:57:30 Zaqnk23tEdg7R -2014809213 -221431739 true 1624981441 \N \N -23928.746 -9.19640365610528E8 YjBGZNRTHL -920432219860498882.508013462 +-8947206039003822808 2024-07-06 2024-05-23T17:57:41 KChylnoQR -1072137371 -266289438 false 384698813 -3931388813627943169 4234237669307215884 3217.89 1.069286287205469E9 WisWyGiZ8T -232088679988132812.265933080 +-8947522864052070425 2023-09-22 2023-10-01T00:02:10 nngitmFl2C1lgK 868624142 143672756 false 2044374849 6863504284805875101 1073390508414661271 16134.355 -2.087058610476454E9 \N 69757147242184951.622419204 +-8947694049829194934 2023-12-18 2024-03-31T21:04:58 fz 667661303 -1314810900 false 2083172863 -4747594226303254936 -1204397295379782024 5467.5415 -9.3192884366038E8 TO1tpgc1fb 304407824663334193.787168082 +-8947928008835793884 2024-05-17 2023-10-29T12:43:25 0BC7EQSjzXHae7YnOOj \N 49786544 false -614078815 8126077317384315579 5067510863876873580 -15374.67 1.735309093582694E9 l53ES1DJoe -293204229434868305.141965151 +-8947942389278324952 2023-09-06 2024-07-19T23:42:28 PVtMfzMNJj2xSBlL -856360723 -1700485377 true 419279368 \N 3863541156512830635 31650.045 1.530984164310007E9 8PFX63LqSx 939004842206816056.400459579 +-8948028373567644527 2024-06-30 2024-01-17T03:59:45 EihrYst9 224290515 -458916937 true -132542117 -2986492834514591646 8623071358742719865 \N -1.31003620115536E9 dJ9rEM4q2N -755680282762297495.321236851 +-8948086196181310421 2024-04-24 2024-01-29T23:25:50 PjDiMgcKzNYJV -1472044719 905256031 true -1505187293 7033355939953607619 \N 31037.502 -1.305312296760797E9 rOBx9QIvMm -135384923899266976.980901865 +-8948281186569768487 2023-10-25 2024-05-09T15:01:41 cP -703890045 -1191118886 true -374161810 -4330443450999351187 -7205030132080897130 23614.715 -1.736585461296501E9 \N 286446046661149249.413097319 +-8948733673872614515 2023-12-05 2024-04-11T01:24:04 8egJXTJlDFEKZmfQtr \N 1926801955 false -80072249 4230517635022854502 \N -26029.697 -1.64260510296601E9 o1fKOk0SR7 -542033333594793950.241296616 +-8948787425908365024 2023-11-26 2024-04-20T13:20:01 tBrGxpxtq9UWKji 988075093 -2132381755 true -1959721237 8458533746862595209 \N 12866.686 -1.291780247648068E9 mlPu5C9Epf 281648559561283409.780341087 +-8949500950115659311 2023-10-13 2024-06-21T22:34:34 XBx30j -993608277 -1822591510 false 850151580 -5143206295827938110 -6265943258213698499 -25674.219 -1.899028359659908E9 LsHkTjLaIJ 182927914621446484.866033160 +-8949569419548829704 2024-03-28 2024-01-04T18:05:02 \N \N -1267395899 false 1738156480 -8207827599681997690 1002091346222234355 24329.107 2.08303892610213E9 \N 645505746148717049.578685187 +-8949788421813631602 2024-08-19 2024-03-02T16:59:51 2121135498 1036464360 \N -1628173866 -5382624058048888281 -1571079601310183491 -28751.38 \N FBGgvBAH2X -290006777908129831.894399316 +-8949904189187450779 2024-08-22 2024-02-03T14:27:36 wofinm1REhno -1095613113 1880217547 true -1156414406 -1413976201767988315 -2912836658388891467 -29057.254 -1.20911702821499E8 M4zNCOkwCe 949769796994980973.537665807 +-8949926738335123049 2024-06-12 2024-06-09T18:08:33 Ft2pvZ6T5TC1ZQ 726693407 1680009422 false 1702783198 5982844624594189254 8224253281089437847 29289.305 -2.69555261484274E8 cgnHTaMWNK 949492123411943781.254979170 +-8950226884639149499 2023-10-09 2024-08-25T21:41:55 0BWxWfYjh7f9UWPQIey -984709736 -675706954 false 492715494 3050365077001017883 6429135385278575095 18147.998 2.008827283091958E9 8Cc1sxr97j 272673280184806570.193213444 +-8950286635889741379 2024-04-02 2024-06-09T23:28:33 5PmS6Xy6U6WF3 344481308 -2100830607 false \N 2915761279962821119 -5722724141195285332 -19740.295 1.309735809206345E9 o0r31ojaq6 \N +-8950630805304769750 2024-06-11 2024-05-29T21:14:10 uTb8g5z -1351660853 226861040 true -1124867533 3252962365513188310 -3380099474645160921 -18340.78 -6.83169974387326E8 1ep7Ao73Dh 771843973471553521.543657253 +-8950876130877956071 2024-02-22 2024-02-08T05:36:15 D45IKlqoGD7 -995428151 160015106 true 759243267 -5751627063605660299 -5550899162944188109 -5998.138 8.80377365476102E8 aG2cjcbl28 -945473954613412970.375055117 +-8951113814890780985 2024-02-26 2024-05-08T04:29:16 YLgLm0o -604798292 -1494473636 false -194682571 -8589013583997276306 7359481726620659370 \N -2.49789062444776E8 b37IlZnxzc 118325618691376543.234978727 +-8951191954483700549 2024-05-05 2024-08-22T20:39:28 awNq -734605192 -597427995 false 2100292666 -6635331467197450847 -1513551954363384231 9345.857 \N xl0DfXFF4H -619221691992919933.323363780 +-8951217024485312414 2023-09-28 2023-12-02T13:24:55 OL 442736080 -44890300 false 1445535403 -4967921146617113513 -3393442293281935500 -9104.851 -1.353590779193073E9 \N 315528513563434211.431944325 +-8951501386190606576 2024-06-04 2023-09-14T03:43:10 fiDK -624421845 -1853366742 true 1700809822 -6021617166107908178 -8903027872407306374 30668.918 \N n0jqa3uYeq -450842283845577866.728041993 +-8951559222433291234 2024-08-19 2024-03-06T18:25:41 ktCD9V1K2Cyul -946455012 69944569 false \N -1215080679840105042 -41716877597536857 -31930.174 -1.506746023680198E9 DlQT8OMtnj 197417299235837911.497532225 +-8951812427475677810 2024-08-30 2024-03-12T20:49:07 W 556987327 \N false 770113381 -4693058377550891025 -7606026856714143121 -21783.611 -6.58904866323591E8 6m34fGZWaN 385280853805579692.576633613 +-8951956857131552861 2024-02-20 2023-10-24T06:17:04 F3or -1002359009 -1167592864 true -591489548 4754404938563266454 3019636633469825822 32061.822 \N WH9qUCYW7B 189896958780303654.254222107 +-8952109265321325401 2023-09-26 2024-08-02T14:22:44 fBf8C 427777899 -1968214486 true 1654746497 -4916898881415454247 -2376508209692152215 21211.275 4.491678647021E8 j5xYPsN8Up 253772912749123316.876548150 +-8952196371596223143 2024-04-26 2023-12-29T19:22:08 oBL4zaLt 494282532 -517140531 false 1250338921 -442127923650639958 \N 22824.91 -5.67034664806796E8 2QkjXqfYOU 3252932737199874.483411234 +-8952509173754235527 2023-09-07 2024-07-17T20:58:50 RcS1h 1438988956 385072491 false -838345202 -4471323211982422936 3954412996221970059 6889.432 \N nCch4CJcSP -379191342784148963.753225703 +-8952539831505078902 2024-06-11 2023-12-10T23:20:54 jUY5tJXqMjyoQq -600094008 2018030586 true 1358374472 8594228002331286275 -8634290941586540646 21095.01 1.354943059589552E9 R2bZOvQ9kH 837195810010999505.971275085 +-8954457476991322735 2024-08-03 2023-09-09T17:57:09 f0j 395634019 1921944874 false 53552618 8351327455131643907 -3843240826716217808 23945.53 -6.95557492225578E8 5sruN8Zh6t -656045453780206752.676650841 +-8954694546552917865 2024-07-16 2023-09-06T01:06:23 T 54385454 \N false 1859536905 4436527087647890794 -345501131392776709 334.31244 3.58161060091938E8 1oGoHwElRn -506922575414917832.725266381 +-8954777276678712635 2023-11-08 2024-04-01T17:53:51 LzFuamD -1088206908 718444578 true -835964376 -635378380861891948 1100921401545426739 12550.7 -1.971737664538571E9 \N 377926518540940371.161140252 +-8956659737209443063 2024-03-10 2023-11-08T09:14:18 nkBGcOWB7fEyGwIS2 -355445079 -454938194 false \N 3297658736728682938 -1641961462888145988 19829.414 1.650171726219375E9 NOYkpj4tIX -941324814456125552.913138706 +-8956945913346279435 2023-10-23 2024-02-10T22:38:37 uHfr4F2VvtxqxNOhpe 1149069423 -1944938730 true 311510118 -5317678965237051782 740680282619379768 -27869.434 -1.741604648677446E9 OQ72BSJlmd 111114895169831101.831071015 +-8957027093289395199 2024-08-15 2024-05-23T04:12:25 1973328165 900192612 false 144540115 -8534056977637707987 -8151026626138665880 26251.127 7.93811481586605E8 \N 501654684429860203.181234603 +-8957058404729399996 2023-10-14 2023-10-20T19:06:26 1323498750 656148573 true -1601218479 2779146592571907906 -308852234719335969 -29469.508 7.0855384890739E8 NqJy4ToiIo 972726547441494008.498832960 +-8957224174357490745 2023-12-08 2024-07-10T09:36:27 61ZyUivBnVQ 1471399086 1579326991 true 1874361828 8709532020905742617 \N -25854.629 2.820849660011E8 r8235tcxqX 80411020199885015.447549072 +-8958531516162698320 2023-12-07 2024-08-22T01:53:53 IXoFo7sad1Ak -865321138 63120284 false -1876510738 2485463654068750080 8865211780884996740 -4470.781 \N QRBbnbmxiC 159612565196035373.829412333 +-8958691583129790632 2024-06-20 2024-03-09T11:55:25 Wb0ejzWPLaE -2055407234 1902492849 true -1447915296 -3542534308272587851 -5485131200826703756 7595.8135 1.572330881055656E9 GBbe4A27Wh -388245129934688097.129929613 +-8958743368866860238 2023-10-25 2023-11-16T00:52:39 aJ9Yvek6u2ISXSDnq5Ga -1467175364 -774604082 false -1422737187 5939286212276015706 9011794811776387647 -22828.531 -7.82186015165673E8 7rAA38JqzQ \N +-8958877944656855616 2024-03-31 2024-05-08T15:57:26 QuhP -874263104 565225577 false -1227692160 -5371348272502892123 2775801281916656666 -1719.333 2.90998712238745E8 Qk5qZrenMK 963041310688580015.699660810 +-8959051056446789858 2024-05-29 2024-03-28T00:31:18 R3Thd1OZvyT 1539361311 544473650 false -900537464 -7011037964506035198 3575887831786196316 -18592.99 1.95350771822493E9 yFvIB61vBj -73067294655942992.593335600 +-8959246387269488711 2024-02-06 2024-05-28T17:19:20 KQS 1974479133 2075160117 false -288048394 1576097098077744750 8940999280537249488 -22069.307 4.4900053081482E8 1zgc99pjG4 -95336916722070692.759940939 +-8959686841045395674 2024-04-27 2024-01-12T10:14:33 BQoMC776CamF5g8SW8P -1690065837 1811739302 true 2057111006 6096807514824662318 5543707539143442315 -30518.168 \N a17oQniB7x 443680711300665313.214198666 +-8959800448412892207 2024-04-17 2024-07-06T20:06:10 Wu9cQEpxVfZHQtNd6cn -1201154009 846933879 false -259145782 -8289290294593264662 6397363546792807531 -9385.366 1.904572266827205E9 rxzDR8uKOP -381063347072003781.258188535 +-8959890246366259437 2023-10-06 2023-12-15T10:47:22 748067469 -256110651 false 559279491 -6958558254356524817 1662549151360891722 -13042.255 -6720451.413929 k0oEoctua9 778243461462816557.626205643 +-8959969702351075930 2024-08-05 2023-10-13T07:45:58 SFXdbMusWn8t 1534089394 -626024709 false 396018625 5268729854537154580 1441944086461318419 -19204.96 -7.10760303196184E8 MDy6h5h5RZ -88486367555743596.269389458 +-8960091668139875020 2024-01-17 2024-01-27T17:14:59 JvLmis -770505324 766237464 \N 1539928046 4830770741130901887 2361379458184558553 -7956.184 -1.263693006321944E9 aeIvSLCb3D -792901881442834884.168662407 +-8960404234803073688 2023-10-12 2024-01-31T02:14:06 BLu05fygtsm3w70crIQg 315289399 1508446763 false 2023142237 6893046307241412551 -3395348947311237529 25687.72 1.452709620741684E9 yW6Olg4FPA -238927193336195784.691694969 +-8960545981437042120 2023-12-08 2023-10-10T03:11:31 CLZcL6C 1857491787 -1684941809 false -1286556539 7438382888154586511 -8401784871292319304 -14616.397 -1.872931551734148E9 7IK1BMjVus 893757242792265814.271756617 +-8960565229325423018 2024-04-20 2024-01-31T05:59:38 c9PVwQUIpxM103 952859454 1669458034 true -523366731 -2486426372453175187 \N -26684.46 1.334481120577836E9 7r1k58ASHl -148834088043726917.800403374 +-8960706107638255363 2024-06-05 2023-09-28T04:04:10 873836118 -217106121 false 1290241456 \N 6377569855306720936 -21199.28 -1.497037699995749E9 zL0FUtIV43 751951770202018017.873953637 +-8960973221974349152 2024-03-22 2024-01-11T05:02:19 vPM7HgMcJOr 840057693 -1299750341 true -490233474 6004198202512526533 -4197261122596858777 23047.656 -5.30647739444598E8 b4WN8xrEYJ 665344814728825273.466813932 +-8961191976587986902 2023-12-28 2023-09-29T10:24 Drtw -917650158 -1909360359 true -1110049785 -6227352269307618015 9064765933272487403 -14872.205 3.99265498768044E8 6EjagKgyMe -487242414493366827.504866759 +-8961240197648596860 2024-03-14 2023-11-03T18:09:49 UMNdO5jYm2NGO7da8p \N 666142333 false -1046645839 -5244252576687805290 3401172520434610698 4079.2534 -1.721060435570848E9 pxZyP8G2PY -919672977975608362.569432818 +-8961521488747785313 2024-06-05 2024-01-08T07:51:43 BLZ 1879427430 484852961 false 1297325931 7233817953259019773 -3941927235355470804 8613.127 -1.04332760147118E8 G7nDNe9SpS -577444347928369605.152257551 +-8961954286796108121 2024-01-11 2024-03-08T15:28:04 XXGW4LAYPOlp 462305815 -493995816 true -1059265592 -8950440100766321162 8394317982763416647 27979.26 -1.442590005102842E9 cAkb30tFVJ -994230574314759107.345459098 +-8962269201290316103 2024-04-16 2023-10-03T22:42:45 qBBLgyjmISM03 -430961540 1260592572 true -876881718 -8246141888658329311 -8391599984420349781 -10172.559 -4.57182836923257E8 JADCsCdG0q 543689948118406090.708472942 +-8963288959237674028 2023-11-08 2024-07-07T12:17:32 ZzSLTpGX35StlZXiuyY -312370121 2053197034 true 1581643551 1618228155038434473 -4891322373779138073 -27547.086 -1.077770484914012E9 zCBA73GQtd \N +-8963580341803205332 2023-09-16 2023-11-22T07:54:15 120533362 1148747047 true -479819797 3778150307376873113 -1440058667496913490 -19412.377 1.304371161503668E9 \N -561944252453573860.305786128 +-8964056528225708602 2023-10-04 2024-04-14T03:02:35 \N -814872316 -1503643431 true -15490008 \N 5740071226993900052 4523.9717 1.687469788076775E9 cyhR9diP6D 391517110070154579.250522156 +-8964330206643315884 2024-07-21 2023-10-25T22:05:48 hU8HgAF8Z5Ja 1145391840 -1792602783 true -1520771075 6699881989607945700 5962195639846164208 26281.457 7.21102377582775E8 wPR7KFi2aZ 132784419927900888.374113572 +-8964625472929640629 2023-10-31 2023-12-25T16:59:30 IKy6g9Xjas -1967369374 734943068 false 2028674705 2563113492415012650 -3177278405856645503 -2866.2468 -6.13493201615458E8 \N 334142731583661168.377270169 +-8964990116742194196 2023-10-22 2024-07-21T07:49:24 65JrXBiLumzku3gCR -1550062678 -1504787895 false 1305071967 935565229393016257 3883805278593941636 -21167.963 -1.259879891997872E9 wKtHdhSU5w 229735372003943306.959020000 +-8965276946297723057 2023-11-07 2024-02-19T23:37:38 py -214516886 -1877350715 true 606038563 6522195749753024555 4356668556309873511 -3006.5403 -4.11237071697208E8 41Y2Cb2dE3 222581334920412627.809475691 +-8965814212274489947 2024-04-26 2024-03-08T17:55 wbc28QApdsRAuRfLz -1291733738 157289952 false -447261799 3859401142280869877 -8854366794843865926 -22723.184 1.555107543939481E9 xbFVB2kcGq 930307693590758456.214300579 +-8965837391332904233 2024-05-10 2024-01-09T14:00:03 55ciTUvcKKjxmUIs0 338283743 1751064776 true -1066256754 -1299901230977489904 7361120819493331287 19040.977 -1.728475504265923E9 WZPqZ5XDLV 83007233237976640.141873536 +-8966104091491837851 2023-10-06 2024-04-30T09:13:15 oov7m4m98 -195498522 -1766812264 false 4728363 \N -5151530522603309858 23152.398 -2.051067710678678E9 TfbWCzEmb5 -764689947766926685.717124860 +-8966563184429029194 2024-04-13 2023-09-19T20:14:26 Fr \N -858047289 true -98705353 8747120517105696912 8556336947811610341 -13206.981 -2.081670749194105E9 GKfYuQpmdB 549147430745551218.903477690 +-8966664582982039476 2024-01-15 2023-09-14T17:47:28 rx 1512071195 1177213632 true 2087832835 -8952264352414763770 2805662915352630119 2881.0754 8.4972388003388E7 O3nnSnLmHE -372208391541817650.422974382 +-8967297438835973847 2024-01-21 2024-05-17T04:46:36 2082434929 1168238399 true 1766380991 -5934842051115830938 7991981268018805312 -9251.952 -1.284761314487738E9 z0bBHdlIIC -37809664499639272.292252746 +-8967694746368530349 2024-06-08 2024-06-04T15:47:59 RPRmhXMCRWuV615G2 -1200764453 122533617 true 658838231 4532535721538049357 2249405588833082247 -9277.553 \N WkS2cKUGve -346664875119036191.110047050 +-8967702675099763536 2023-10-16 2024-02-24T01:49:49 1wRCuG3ELBGIxwrNHP -1278237740 1349811441 false 1621529588 -8937426796689008185 8910415585350493426 -26564.576 -7.45806152685386E8 A4ukOzmG3B -760588056164269291.493523629 +-8967739244713952878 2024-03-28 2024-08-12T07:42:49 Uj 451657776 989234096 true 184469934 7867892736415902682 -7379380300171052924 15300.082 -1.856803673383419E9 Oo1dMKOQUL -661764347807442590.591811726 +-8967919975435422064 2024-04-03 2024-07-13T12:42:32 R75Y -1028305106 -309606751 true -727007013 3092147143131557597 \N -20619.773 -5.11000815486948E8 rBNHjVBJjt 627381277582986113.112824119 +-8968133676936211592 2024-05-25 2024-05-17T12:24:28 x 728200886 -1866733631 false -1406213760 177459067749137850 -7068124839208170723 21129.832 1.296393770550651E9 \N -131797347140467710.510993437 +-8968253008696818263 2024-04-27 2023-09-15T06:57:29 pqRg -436800019 -1677524614 false -1664642603 -8293316356940541893 -7893429245555133082 -22172.574 -5.6639814365838E8 \N -939079699045149915.144967376 +-8968421868221139173 2023-09-29 2024-02-02T02:01:48 q -542940981 \N false -1095689784 -3413845385415955309 -3442984555781742172 10088.439 1.135095840952524E9 7cJABPdNpa 57537380218997791.655526498 +-8968758343659445193 2023-12-11 2024-03-24T20:09:27 t -122832487 -1097984901 true -588395652 3014797105852019578 -1192247892637347700 -2333.5537 1.09708312368122E9 B27ftGt3SH -348856802712425226.616902875 +-8969192619423262993 2024-07-11 2024-02-19T21:54:59 gUhpegc0 1779095584 1488250875 false -938283355 -1744149396739911379 -5407356536292011323 \N 5.64069713112435E8 CyWNT83nC9 -202084959488172055.209952775 +-8969390927621048252 2024-06-04 2024-06-12T07:30:46 SIYPUyk30pQAuhPQ 1538757617 236496109 false 1497149434 5769674884858393075 6461287562396096952 -14881.92 9.05125458035156E8 RegnK2dAxi \N +-8969573695825656652 2024-02-21 2023-09-20T09:01:06 zp4J2FDnOL8VVKb 909763056 -933279195 false -820741653 -524004670335358918 -8551817343717201369 -14493.239 -7.39887218506436E8 \N -515653189133800556.837291426 +-8970090422647561236 2024-03-15 2024-04-07T18:52:19 OnG 94947325 -1938912465 false -2125508348 -6287717821739999159 3834217632879489197 -791.8398 -3.74969391031275E8 EdAqyXZ9lw 20433577886842399.485923473 +-8970140784109889821 2023-09-20 2024-07-13T02:39:18 9bSNzLkkEHqPDizOhE 1538225105 628962089 false 2079995655 6858712255827221621 -4189202451803889286 -3977.8682 -1.875932248690565E9 dYQa8HLhlM 127902582682496548.451905690 +-8970204183641997375 2024-03-21 2023-09-14T03:38:06 \N -1466091905 1495373996 false -1317092795 -131629123538391201 7372993386406577174 918.00916 1.955425124731413E9 O4yHEwlb4S 233729631639770097.388301771 +-8970521966785769492 2024-03-28 2024-01-08T15:34:26 PbqlgGxz3bV -954273668 1984946825 false -341619009 -4221170151990797885 642581178638509341 1179.3179 -1.612495057230553E9 6cWnY5Yt09 80379796241136137.467786221 +-8970554427988747028 2024-01-01 2023-12-20T03:07:42 qfbe1d90P 1499805508 -2131223807 true 1683768994 -1702515155238895719 -5094926626596285481 \N 9.53423071179961E8 Yok5y4DuqM 670866301564980815.641332533 +-8971123938556616166 2023-10-27 2023-12-20T05:13:08 q67U0yS18 -859299432 231800452 false 1123784257 -6265147442825394834 -2320013053705640351 -12365.88 -3.0962577763491E7 amfIhyxJZu -523782248321933666.448718515 +-8971198866844534477 2023-12-06 2023-11-23T11:37:05 3n8s7zYh -1236007344 -504613502 true 2062553255 4356088167217579538 2647157784137649270 -8177.801 1.037115812565683E9 yG0fYoEuCa -512047146664332839.867248214 +-8971281601545986269 2024-02-16 2024-08-22T18:42:26 xUNoWtzQk7wL25bgJJEN -463429977 665436720 true -2116604246 -3587059617525829051 -7913029619983089489 25205.764 -6.1016267764079E8 hoHgaYvMDL 744518528159234596.975990060 +-8971412509270299719 2024-07-11 2024-02-12T05:01:55 Bnnf61wG1B 1034365127 -1310398618 true -1970338377 -4317544038515874596 7251220068798692219 -16409.75 -1.890498441881737E9 8ynBPgsaKw 77795189114757406.437616162 +-8971565229669979926 2024-06-08 2024-03-15T11:41:03 mhFQEwGTVvP -957447129 -454430491 false 643094503 \N 2855335685514751217 -9184.465 -3.19399623752301E8 k2a4HiHo7B 510526117320253070.260337382 +-8971864120302375132 2023-12-20 2023-10-24T18:29:41 5DOnv7ng -1070108455 762637746 false 480373271 -6104258961944481178 -8340724985453241369 -31639.158 -7.61516728061985E8 qmeaBwvAhM 869843548062738805.521195933 +-8972747639323784023 2023-10-08 2024-04-25T00:16:27 628799357 -1692433313 true \N -2898816399403821620 -7006270713146685970 -16479.635 -6.55061472003438E8 \N -853349074195552189.619074020 +-8972846145602525769 2023-11-09 2023-11-12T22:45:24 yQrsDhAkxr 1296921618 284651418 false 128586035 8359088310886374880 6451571891645210509 -21454.777 9.72201506974655E8 JYrEFb43gq 391576436300388168.266963741 +-8973076127755556760 2024-06-29 2024-04-18T22:21:41 eaw0qgH8ZA2aw -1731847811 459503070 false -1264836634 805134622104278791 -291122487548869874 24978.77 -3.13795828991627E8 Xqj3dZ7ucu 629794482136918847.370794017 +-8973191473689994876 2023-12-12 2024-07-15T15:06:34 1hguhHHVP3OM6s0mJyG -1676851062 -1767492997 false 244509116 310391275661355342 -8564598379835963460 -31756.877 6.95860436873212E8 hduRUisLrZ -703667700842676057.731075705 +-8973728993520106846 2024-07-15 2024-07-19T14:19:37 EVMXpJFNGwFt6sQrd 309056760 1142687806 false 1642788297 \N 6079261109057654243 8949.095 -1.478621119407506E9 50bn33bdUt -759353907739978024.443656522 +-8973807910577871697 2023-09-27 2023-10-14T15:20:25 xPEQGlzoXcSowb -1498654206 879072669 true -612171821 -441015474071296604 -3031091135961939627 -22064.662 1.795727711642202E9 ZO6DuCLYHp 297065916130859304.327152370 +-8973992180832189228 2024-02-27 2023-09-24T15:41:59 5jtYdv1 1966922660 242816799 true -800461734 -9148765401057624237 -6304842749250756354 10228.125 -3.31110927058325E8 f2KkiWg8yG 188070580483481562.782430798 +-8975147489864609692 2024-04-06 2023-09-06T15:48:41 Xar873YTGW0XQlUuT4y -122947015 -1425436394 true 1391615800 -6912237500344516726 5047878393022927838 30291.516 -2.109762869742017E9 ZoyaHzOJuo 388599282301597655.756656057 +-8975207747247653078 2024-05-06 2024-05-11T13:04:40 hgPV7pIeV0l -1551607399 1881965764 true -1525667202 \N 205617435308321779 -16948.666 8.64683197158314E8 XPk4TRBVnH 676346956193397077.297551523 +-8975325632650873153 2024-03-10 2023-10-30T09:14:09 Ave1NaZgC 1658789141 913004121 true 1221685574 5915524812115591019 -8858590278311595654 -30956.172 -9.19272872766193E8 wRqTcZBHDZ 492099154587559757.479098075 +-8975446778862724878 2023-09-06 2024-06-19T10:45:58 HBE 684137940 -1228208016 false -495018949 4773868430557790194 -336124179908390509 -13344.473 1.153229156026653E9 QzhEvgizUT 794773587570358395.964031473 +-8975699497034205367 2023-10-01 2023-11-03T09:10:59 8ZT2cpgR 1223845191 640655711 true -1188922933 -2205324998831928681 8495488307557341861 -13175.551 -9.4244516049595E8 1ixlhBpklf 95989336980624320.245152285 +-8975783419330902230 2024-02-27 2024-07-12T06:17:23 XdSJwxN2lV 154108459 1000638737 true 1568546353 -7359262278974779890 -5219640710791072696 7501.8784 -1.80124356160925E8 5GHU6V8UqT -417687414602779533.671779400 +-8976327861478760987 2024-06-01 2024-05-29T22:48:02 36 1322346295 918838744 true 1655286700 -4054536493823821088 1831003631430461594 11487.776 -1.188119143220899E9 z0MvIyXFa6 726201600320551987.119918412 +-8976379948322963852 2024-02-13 2024-03-18T02:28:45 npkhSu -555166236 119247899 \N 1584159763 -8473421593021331090 -5636494174379393254 21394.557 -4.7984881216161E8 jv6XIYB3vw 467365137300544363.500157849 +-8976563457678384345 2023-11-18 2024-04-13T04:34:36 xgkG -1443325234 139573658 true 1513600946 7548804664055734441 -5132263406103499061 -7564.9688 2.92014195113034E8 u99xHP1Lp7 -415289088065796002.587126457 +-8976654626133045370 2024-01-18 2024-07-20T19:30:42 K1 -1217808599 -1913513325 true -1682741600 \N 5228915836375516308 -19916.564 1.054953006971154E9 LAnxROAPsR 367877215232610271.726566884 +-8976718262353948973 2023-09-10 2023-10-12T05:48:14 Iy44ziW9BwaMx0PH 791993838 28733356 false 2054067770 1645736657710383211 6693791236847222490 20955.605 5.89654345950449E8 2pn9KMwRUa -993557571427435088.877343164 +-8976860983679505396 2024-03-17 2024-03-02T20:06:05 OCPXBCyHBbGfG -198125237 -1614558483 true -1507129986 \N -8794148939261099447 8320.442 6.69344495278656E8 obYrebFYos -471335798279761895.293480011 +-8977666079799355659 2023-11-14 2023-09-25T19:44:31 XmY5LjVbgUgk5hOmV 1909151449 -777140752 false 477628960 5352743384885273070 1725305912836974548 -26715.312 1.881319587919738E9 QbXRyTZMyD 599030298153884901.584782470 +-8977969711645231767 2023-11-18 2023-12-05T14:32:13 xMr 1471377031 1247880198 true -660954150 -1135411850287043033 -1235068399818702132 312.8823 4.5485903496403E7 KOrqkPV7Id \N +-8979012385316062182 2024-04-12 2024-02-16T05:28 y 1385051378 1771269411 false -2011517411 1622409969597832945 -4468942483232934476 -28514.543 9.40676733929829E8 6AruAedEtt -488793806108952352.626784793 +-8979547300679871479 2024-03-15 2024-06-14T22:43:23 8l8A -107923247 1958398308 true -946786380 -5625592632721310423 7731769283850742683 -12785.037 9.28869199284963E8 G46E7vtkyV 435571103687161735.713464726 +-8979604822366351694 2023-11-25 2024-03-16T12:46:07 nTYn0yhl9FnHTOXLSW 196164768 662794874 \N -288348309 2704190407845071720 -4109120259611966161 \N 8.33437185371055E8 \N 851156194572328518.384102550 +-8979991491036316870 2024-07-09 2024-08-20T16:05:32 52h3yDHQ -1362586703 738123420 true -1981168208 4704359062282523976 32624651041649303 \N 9.13024133794728E8 mwJmfVKNaq -763651507951260883.583622588 +-8980432386045724859 2024-05-23 2023-12-25T10:32:11 OrZVBrVzgki 902219262 310484213 true -1076789568 -2598004690896005109 -8684579711011921897 -23982.15 1.498266177876805E9 CEFy42TBJy -662320631659495435.608687290 +-8980655894367520779 2024-07-19 2024-01-26T04:05:48 4NKnplKzIf918xDH53e \N 401620409 true -378563404 -4228064291582049358 -6368864878587472958 3085.2412 \N YninGiDQ1h -799510179760058278.317454962 +-8980791835050579570 2024-06-28 2024-02-04T17:30:50 \N 1691524376 -1639859098 true -1694166423 393175152997244334 -8508885884897529082 -16941.035 \N qsOTUFhxf3 815544288964088907.795629089 +-8981096349414991852 2024-05-12 2024-06-09T16:18:38 0t8VtVnbvXBgCwE 321868849 -1430893146 false 1782397250 -7965513303808245688 -6827599083946310650 14912.545 5.72233633354435E8 mm8oHKKJTw 550913711397277219.836361429 +-8981224764318820529 2024-07-17 2024-07-09T14:41:25 CJYee -1803934786 -1222266885 false -222046707 -2553195978001416643 1635483890324794369 -18477.69 1.235693859268241E9 2vJH30QLWK 342863195535065588.233011703 +-8981624212135011794 2023-11-23 2023-09-15T12:05:24 eP5ZdRH3KbU7F -1944063153 -49839645 false -1014610493 -8121273624969808288 -425084292273773916 -14841.657 4.35291065156165E8 4rjOTltEMy 786186029356859620.646735804 +-8981813017629249592 2024-01-26 2024-03-25T10:14:29 J7zz -1689505539 -2116777241 false 1692682007 \N -2067670411389004862 -17670.42 -7.30504844213959E8 apBnF1Xdxx 187413932931632151.668888421 +-8981958063200014016 2024-02-28 2024-01-14T19:23:37 1xe4 -415342554 \N true 2051336433 3151928565926710078 -7506869522669317887 -29595.014 1.596907294745378E9 YGcEIO0M7x -304700308574711011.733126355 +-8981970094919394192 2023-10-14 2024-05-16T08:55:17 W3xOZi8nast -44096240 1516821751 false 439242760 -1149978919712260922 -8363517333425479031 24660.053 -9.16834448955342E8 QOPwrufBok 275951054152011127.448724348 +-8983019876183258326 2023-10-02 2023-12-21T20:48:44 97ouZ 1379218827 1418608103 false -1467312126 1465468222654653364 892351382584512618 -32342.217 1.86805054065367E9 TDTX1K8Y6M -11724595128623650.273741812 +-8983523398206234220 2023-12-25 2023-11-11T13:28:45 FKz -538119949 -1797348154 false 1519960197 5951956957921025826 -2240574991553139252 \N -9.84893695114572E8 zQl3UOLVjG 769595635449777008.874816451 +-8983884961056069539 2024-05-28 2024-02-23T03:23:13 \N -642616739 2118420198 false -510875999 5631165474267554429 7799907845236595315 -25391.729 -1.955204709592943E9 uax29UbFbQ -640639687931494519.981327171 +-8984709566443483391 2024-06-27 2023-12-07T13:33:02 shxjaEAbv -1907828930 -2042886025 false -429574543 5294699317174544430 -364834564829976808 -11581.984 1.207100142391857E9 0lpjKrFot9 -468784222614763336.601966826 +-8984892195866915757 2024-08-01 2023-11-30T03:20:58 DwbBMDEhjDwkXHxLvU -5150396 -356673809 true 2025266590 1128383269111164867 5372214940350678945 -11469.627 1.578294093628584E9 7bbf9M6gLQ -167733225748299860.231635370 +-8984965550195327961 2024-07-18 2023-10-10T18:59:44 tb7Nzf7Cor 339032546 -543868610 false 633610026 -1523099621734952042 \N 4781.0483 -5.64263608083484E8 F2CGc88eRQ 128282515124423477.647142019 +-8985052552453301541 2023-12-09 2024-02-27T16:48:58 600677088 762009168 false 474073538 -2727794606610526348 3928290700583397066 \N -1.940587080264087E9 pR0jF8jWKO 880236594754175392.458929083 +-8985355944307787984 2024-05-11 2024-08-15T04:36:44 wNJyAApVSVbzqJ 1888307921 205258372 false -122697141 -354976135661576942 6558205168025134453 \N 1.225872296325974E9 FhVwCqkCeM 845875057463256922.401642014 +-8985818132126380179 2024-05-02 2023-10-21T04:27:50 mCUm2zB -131810754 1794335996 false 1313861732 6217731354868004765 2948738127343894452 -12328.709 -1.472712377621225E9 5XCmuxiSFg 692078940093538752.211820673 +-8986792963151014196 2024-06-16 2024-08-24T23:44:10 w4IzDVbLcPuni8Zhcd -1893383212 1559624495 true -768435281 302346794382843059 -6364098631644280956 3614.6926 -1.636434440462545E9 q8h1YXbg2j 135565484978612377.394226890 +-8986921350492434166 2024-07-22 2023-09-28T11:15:05 \N -1817896738 -1270118931 false -1424883844 5768849886758338015 4938042299931924965 15341.012 -1.952997897868809E9 \N 480974125601717517.457801178 +-8987031853056823074 2023-11-22 2023-11-27T01:05:33 nlftahlRJ7jwbxYV -710223188 481990480 false 100963118 4085542585161486803 -1405300385313344597 14032.346 6.38288762556077E8 jkk6VmPNtZ 63556993893249095.970624176 +-8987153662239928213 2024-03-08 2024-01-23T05:21:03 jV7d6K -965094059 372283479 false \N -9195891986852827861 -492952111314536430 -5242.0864 -5.1817671085664E7 yCrCcPfB94 130240366939118053.470287800 +-8987234581863684994 2024-05-13 2023-10-07T22:09:48 fdTxit -1810052375 1224182206 false 1975529678 3305741436260036470 587246113386442611 -19828.76 -9.39109400064495E8 xEBztxtUfE 102206153588006018.338764297 +-8987254651203895480 2024-03-15 2024-08-05T12:40:07 7ZS3jLmSkZ -625380641 -534248979 false 1951267953 -3513715733658859655 6645737727615213652 -6454.0415 1.034039148373813E9 ZmRkR3itWN 472033400590951763.130703771 +-8987495698593150718 2024-06-03 2024-05-18T09:57:41 UUcyB3tIqKLyApaAPN 820471581 -1740828922 true 537126485 -3045178225060484527 -66118194956832427 \N 1.671368773403436E9 1SDHIZkUoJ 981514074533269232.548519290 +-8988345952146710464 2024-06-21 2024-08-27T03:55:03 rF -647655295 907555041 true -1416908230 -7680823138687269824 \N 20393.627 1.783575183960558E9 RAPcpbI58m -409884091487293438.912350254 +-8988406496864558950 2024-04-26 2024-07-15T21:51:49 jHGA 718641737 \N false -496223325 5030949192607327503 3773192679294417607 689.9825 1.362795660187393E9 ehDWIDHwSS -763482048738984424.851428763 +-8988582502794962529 2024-06-07 2024-03-08T12:52:25 K 1374193639 -15718608 false 678762875 -2433151465389305971 -5967214057836881059 -22030.68 1.023753557039859E9 ooQ7TcIsUn \N +-8989539988203129814 2023-12-10 2023-10-30T20:04:52 UTO -678853621 1625321451 \N -1966292004 -6420816431207672822 -8015295447794481128 1869.5784 -1.175741942457983E9 R6YU2jnNgJ -588242759642107355.626079034 +-8989943460445541427 2024-01-29 2024-08-18T03:11:53 yRMLqHrz -1506580901 -1025360231 false \N 5697340964093162808 3766531453228309066 -15490.98 7.35389869214917E8 dMnQiEFNvq -121186567703736083.401004878 +-8990451206848372638 2024-03-03 2023-11-24T05:54:43 ij2tXYwEq -865216938 -1484688601 true 1096277407 9204852824692673219 -511541433312870514 -1426.709 -1.000860170991103E9 5SuOqkqgew -228456824006147894.582217490 +-8990995905960584364 2023-12-18 2024-01-28T20:49:42 oB -1756953458 1267494641 \N -1532768281 6596269732694045708 5101795570251083099 8023.632 -6.24377524745266E8 yAPZYNrVTS 653573566863764544.100024030 +-8991276358936227514 2024-08-21 2024-07-02T14:58:56 -266619938 -1071689550 true 534971333 -7464140226255634976 6810796551979274557 -4378.5825 2.120008487142971E9 5bhlPPZ8Lt -124888724347091660.790824979 +-8992288234488706251 2024-01-20 2024-03-01T14:01:44 bdLF4GeOa4wOJU2 1271294915 1935589144 true -1133101591 -8872748434110421434 -4983491189008896657 -3952.2278 -7.47769745972213E8 X0g2e5RPyb 953411231224949242.398548466 +-8992462403092724282 2024-08-26 2023-09-23T12:16:43 Inrl8tEYihglF 1751884251 -1798726783 true -2143741766 \N 3083571347579507781 -28162.63 2.8807122711765E7 mLnxGNNSYf -62451904304088400.146758212 +-8992676779375488500 2023-11-03 2023-10-06T00:22:03 R4CB1 -1929291758 1949275159 false -1522713821 -2688910305207397555 4694593961920152464 -25308.05 -1.12189413660441E8 Sp3tydM1Hf -993631993793718491.160234411 +-8992936293953727018 2024-02-17 2024-03-18T14:51:44 jG0Y9Tqe9l0fAeRypO 927163056 -809615425 false -705407732 -6860011828580340818 -2001455519826772317 3554.8154 2.93268357190258E8 61fwtPnkt3 239644728340207645.781198488 +-8994296538909364482 2024-04-07 2024-06-18T03:18:29 xokH9wE3 907853201 -2091673805 true -1654846868 -8158247769378026353 5477533702986229523 30236.23 -1.766754609694937E9 7o4ti0p4mo -751701119605685343.505381973 +-8994363443521471659 2024-01-17 2024-05-19T15:35 1qgMr0OZzmaL9ow82xE 868056946 -310529849 true 1759331001 7340696775367374620 -624653540182614713 \N \N eePJoU72V0 778360565947703184.650898111 +-8994442591183063779 2024-06-12 2024-06-24T20:04:29 0ULWRw 1429682487 1666901099 true 285799945 2742162147634171565 8232661814631866915 22533.75 1.5287294561732E9 M86qHeD5px -769561653152917780.718236301 +-8994494390069699849 2024-05-19 2023-10-12T01:35:09 dEwJmsJvooG4 1055091747 -1617349000 true -1200881259 8256243529538775677 -6258767680493849458 -3331.468 8.94371424438567E8 8Cy20gA81u 694315471794170764.913719121 +-8994506559838815476 2024-01-16 2023-12-20T13:21:15 6G -486251692 1936224797 true 1323891873 -3165640186385163449 -3670857422036421067 -14408.524 -1.800413252550306E9 kK1JVd5kBf -387819067175084033.175876879 +-8995399680362026463 2023-12-15 2024-06-13T14:56:50 kprQ9Ba 1741394248 1126401488 true 284798490 -1380768999733981681 6490005571405519272 16129.6 -9.32803193419174E8 bRVweRMUV3 -157683776231203892.240828631 +-8995598685024368462 2024-08-12 2023-12-11T15:50:56 kfg6OrNjHP3R \N -1265734671 false -1651298585 -411939055809456142 807110321052394986 1855.8549 1.426844584177333E9 6Qa3WRA2Hq -48971545237271695.975482849 +-8995983603281384965 2023-12-08 2023-12-12T17:41:57 WsGS5ovEuS 865359733 1137556506 true 1495698111 -6927830346769245120 4495624531776446034 -23825.703 -4.65027263616728E8 8XPhhS3NBi -633174096327976440.394921049 +-8996259970456159033 2024-04-07 2024-08-02T22:46:52 9bEHNnPFezo -121759796 -2129612520 true 1297291328 9155709717757054070 -677349201045070946 29305.766 1.799164887491906E9 DP4KdAXgbs \N +-8996444529579196867 2023-11-11 2024-05-17T09:48:08 zHjGdSM166Pundru 1955541759 -576888498 false 295971482 -57687938020308596 \N \N 1.638916295711312E9 wl6xBbrKUA -628010450271221861.615328592 +-8996596168394339962 2024-02-15 2023-09-15T14:44 jNXvA3s0mY5553 -1506275655 -1231649814 \N -1642515861 \N 512557704409881590 2646.0042 7.01537915852314E8 NLSaPSnT2H \N +-8996915999126368252 2023-09-15 2023-09-10T06:57:11 4dyd -219514081 -1828143167 true 936532330 6827754438447153369 5839835958567922330 19127.57 2.134655125215748E9 UQGAhEZy87 845645761428381494.108020255 +-8997123210241609737 2024-04-28 2024-04-04T20:54:44 LhXmlfm1z 710310488 \N true -757959047 5375608181915685118 4196371862590311001 -18933.047 1.450606042349138E9 aa9NJ054JT 613220322883659916.171144546 +-8997144085801102874 2024-03-11 2024-03-16T02:57:19 LcuamPLdLDNYLD 444828052 -900290677 false -378584031 2723046500909423802 -199508065594829722 -14671.922 7.59526737152041E8 \N 949559459955211569.357149974 +-8997718169665753056 2023-10-31 2023-09-05T20:15:12 FSU -545520357 351799227 true 1639891491 6294614191835740232 5896548182775962629 -15222.579 1.51278032208953E8 Rc5pP5U9ZB \N +-8997901259905767970 2024-01-16 2023-12-07T21:40:45 FtikQ7YmJRwu12yLYOtI \N 1752134131 true 1285312284 -2204242916434223370 -5136443196759758776 30631.127 8.45893994458908E8 puu02uvOcR -195830088786543520.316671421 +-8998041677628549066 2024-02-16 2024-07-26T17:16:25 ZyJJfe5iPRy 379468263 1154648799 true 1351838505 -8488820518991291012 -6391998941115984884 -14966.723 1.89869657686528E9 BKUKad4vxL \N +-8999198191387213362 2023-09-26 2024-07-16T19:13:56 sX62n -68222352 752762427 true 1605622329 5917505385860022453 -6745928307803242160 -25890.791 5.70316454714567E8 CRGYcIfYrY 651859742908125971.518631570 +-8999239733355500195 2024-03-03 2024-04-30T06:03:34 1ZDBMMaVizUGu 1073909917 880566942 \N \N 1373267113169831348 -4323827376286642892 -16810.564 -3.02596703093454E8 tY8opWylDA -467152788012787291.958660905 +-8999461972857938640 2023-10-24 2024-05-04T04:01:31 0i0IJNHm8SiGlNILImTY -1865490275 -325116677 true -110281110 -8833915686015420032 -7280169674791769875 22153.121 -6.69752076525058E8 5tS7lAE6wd 685022539866188772.850181809 +-8999499166826565174 2023-12-03 2023-09-24T15:27:30 TBInkfkTWCzrf8wj0w -805766703 -2071482056 true -265324482 -7184357389319627416 -2490098259155759525 -25198.768 -1.377338505790474E9 HhmiU8r0nD -139498077918906890.999713432 +-8999724661979944566 2023-12-25 2024-07-07T19:47:33 JCWDusy8 309221137 1181879995 \N -1307183914 81616798833617247 -2149223951811277960 -25400.623 8.73658674290727E8 rVNTLjUu5M \N +-9000082942771660228 2024-07-29 2024-03-01T16:56:02 AVCBr2rvlAMgo -416718232 1720768142 false -1371087263 -294726150575077886 6293424187825506545 8724.407 \N pgQF26guMB \N +-9000159988856856668 2024-06-12 2024-02-28T20:10:24 5MULSXVuO -2116687597 -83369968 false \N -3276557388922427310 \N \N -2.056568565688535E9 xUh6KDZBJ5 -872015870067866006.825755352 +-9000176877086231700 2024-05-21 2024-05-28T22:39:40 TF6 -1763415858 717949784 true -1718811028 -8572572852955191572 -2932881975096652532 4155.2397 -1.677483061603912E9 qGTofPl5xM -320424605718292025.318301558 +-9000208978673141487 2023-12-20 2024-01-19T13:51:01 o \N 1849748225 false 654061319 -1276316518703932783 5620029798481311480 18601.236 8.827942479267E7 DhTnGVhz70 -903477237781493412.252332355 +-9000352574220187942 2023-09-10 2024-06-24T13:55:06 \N -842645789 1213439647 false 849870284 -1703723854120616961 -7796337153684483669 24610.773 8.21599053866149E8 lGNmBsvoy6 929479785431869268.784511404 +-9000355053568148287 2024-04-11 2024-01-03T03:16:26 n0tNVpJ7dgd 951933061 1934498539 true 959378927 87661101592257431 -7359869229365767525 -16226.456 7.28602487389106E8 9kTexIVGlH -819970862201507799.403595320 +-9000574300250539694 2024-03-03 2024-07-18T19:02:03 9V7o3kXnwQ7OFf -141406856 -1630268264 false -216420098 6624195352076961941 -2450120771691607790 2064.0393 8.20982727490433E8 vZDVFrnq8o 838815423779121489.980125419 +-9000825700850903927 2023-10-08 2024-03-10T21:34:39 GHjlbFQamaFF6bL 796879558 -1229812633 false -1463167692 -6104080936070599692 8171008251135581438 16529.406 -1.872758034057101E9 HGXrSeNRbE 237137261825385910.240601945 +-9001064847953647712 2024-04-23 2024-03-14T16:55:19 slMPA33kzOX 151481153 -324158704 false -123058217 -1276844264739903849 -2977583618866572639 -25315.668 -1.292861028534798E9 bcyT7h2CSg -184180730041109591.412416202 +-9001092794882799270 2023-09-13 2024-05-03T21:58:19 9hMnIBM6ah -17973714 -1469645126 false -1228012104 7431804269539387891 -6141953495247509792 12827.323 1.499902063142796E9 bRPDztNPxQ -975406913644135934.640469224 +-9001095519968628210 2024-07-26 2024-02-11T06:10:10 Xc7dHdr3zX6PVGF 422595743 \N true 1697889268 1400975202978208098 2111889361197629891 -23299.895 -1.772922193276331E9 ojXe7MbMCJ \N +-9001362905969229876 2024-07-31 2023-09-15T07:11:10 JvSo9IRk67U5 1589888941 1067967447 false -356145399 182120256352185885 -8934652494380093341 -3523.8809 -1.054398701913644E9 p1gTXeVyhd 429985079376508662.698430093 +-9002432141464977983 2023-09-17 2024-05-19T16:04:20 N -1024757105 452485480 true -838868870 4698487261029444802 -2015386427739172501 -10878.06 -1.672965195302645E9 vtclK21RJ6 671488551524428578.834228765 +-9002822050488406491 2024-05-13 2024-06-11T23:04:37 ZqfTJ1yJapCH40H3o 110979284 1009406600 false -1223142040 7533088582938139500 \N -13771.271 -3.8996366919508E7 R8uuy8yHT3 -581052797392120470.310771056 +-9003067892085261900 2023-09-24 2024-02-05T00:11:45 Y0gPZ1a2T27z7y7O 811569976 1926439633 false 1911742609 -5367070973080461205 -2861074000047942357 10940.752 -5.21356726515395E8 chjtfltKwE 351962251260081115.291250105 +-9003416179084113803 2024-08-05 2024-03-31T18:24:17 7533545 141856394 false -514252877 1503136695640542912 1882681426773606212 19482.41 -1.223530390826646E9 Wt22y65F1o -685133825381441942.769047706 +-9003603730618426357 2024-05-10 2023-09-07T03:24:51 LDma7XBxQmwlQ3NQT -1398872407 -267711961 true 1286589421 6088409874948194974 46627615855973528 \N 2.0246510150163E9 AuuEOLWIDK -28027350165801021.962312610 +-9003637491701875048 2024-04-12 2023-10-26T00:10:34 N9KHv0zrz1n6mU 731758126 -950440645 false 97424894 -6880733510407296079 3972399865749269847 16983.865 -9.28671807343089E8 1zevuiNH13 813597111870250087.829897333 +-9003771693833161608 2024-01-26 2023-11-02T08:08:15 Nd9iQjVRCoW -480944077 1852465681 true 49586666 2473426175805501480 -8247289238373192837 -9466.693 1.806650511335656E9 YfP3EbxTcS 86514090543860615.926179110 +-9004697416171718501 2024-05-06 2023-09-05T15:59:08 Es6TC -867366806 -72119171 false -2023551629 -7209272288635071117 -4054529546964343048 -2097.9685 1.978181394359365E9 1YGkg9jaZb 581386045526679018.318510128 +-9004883411976104733 2024-07-25 2024-06-15T02:54:09 GHjUQQsRBYs2ZB 988099009 -2076133195 true 595058017 -55987877754897910 2217721775158487837 -29784.201 1.49049625262389E8 bAMyZx3EL0 145747487229771792.583675670 +-9005200504968500765 2024-06-20 2023-09-21T08:46:25 H77dAUiV2Jn7 -707539824 534832842 false 341797698 -5042372161692344052 7437803558274946065 5926.554 7.5094140435577E7 wJbP4cI9Bx -828200430927269776.744401479 +-9006554604664256354 2023-09-23 2023-09-28T02:29:41 -1007697150 1825111282 false 1852469654 -9168878686462911131 -226403964658396530 -9650.115 1.8499702974259E8 4lhwKvq6AX -69626546295264538.397900388 +-9006798632649333890 2024-07-17 2023-09-16T04:24:45 ZdGAy -1556776371 1923783862 false -720692031 -7431997540585632442 \N -17972.621 1.42919150725012E9 6HoSZljmk8 -540502668478807737.591463660 +-9006820624006881352 2024-05-02 2024-03-08T01:23:11 XUkgIVT -43368800 -688852484 true 1683663720 -4582429739643311269 447027775060472220 19806.021 1.855604148276435E9 bUyPIrOUjc -453744295770290112.405610276 +-9006830163483636240 2024-02-04 2024-05-23T10:50:51 PilzE 1455026754 -338818149 false -241964308 -444557782269216895 -5692654452145014668 31264.8 2.029073254897799E9 xz1UzEuxFD 944553493734194644.586621125 +-9006872898386903754 2024-06-07 2023-09-15T00:55:59 32zxp9dMw3Pl5 -604646486 1095865168 false 1538335824 \N -575508714032683396 21719.498 1.083802301848454E9 Mb9ZDr9Rjl 731841624216741723.871637493 +-9007080380236015723 2023-11-05 2024-03-30T02:33:16 EYi2LwcSvulgJHGY -798929513 -2062578096 false -50606560 -2112025082651995734 409989927651148904 -25184.715 7.93787262818162E8 CILg6C0Wx9 -744289658099494528.753650510 +-9007101162014885720 2024-05-21 2023-10-03T04:58:20 ls6DzDiyCm9dB 664179195 -120792820 true 860423343 6114590600376762733 -6016624279764387657 -17592.416 1.390258936174731E9 ps34MDY74U 823560245098926686.821552120 +-9007279593814506266 2023-10-04 2023-11-07T16:14:25 tp4dIxRbe82bsnBbE 1512586200 1558755638 false \N -7086324674946879032 5729877230615802098 -18291.0 1.151156644757679E9 eQipXrirru 527750345414011325.193122459 +-9008615881970439143 2024-01-30 2023-12-03T09:20:16 943dKr8qLk -1144074528 -463646102 false -191773227 2706771541876005067 -6389216902669791921 -20673.871 6.45235123949749E8 \N \N +-9008713900106160182 2024-05-22 2024-06-15T22:48:39 GWjGkg3kYzV -977205919 583903848 true -1511012435 -4955294140230217643 -5711256071175869190 -11607.778 -1.145224308433659E9 xE12AjCNBg 152440810345956669.652358998 +-9009126785111071588 2024-07-16 2024-02-25T07:30:16 QX7Yly5WgC7ChDp -1020655197 -743623341 false 81097366 6395854103968196519 \N -5440.277 -1.670788595375061E9 \N -877421707827296088.348923850 +-9009128536440597892 2024-04-27 2023-12-10T16:29:25 PrIRRvEktxmj -677557891 1576584324 true -571351746 -2454041904536207520 2848160142206780926 -20064.857 -4.53826626246843E8 tYrx7QRG9m -703703818120520194.576834370 +-9009175133395519276 2024-01-30 2024-06-06T22:24:51 AcBiSpFDwlD017fp 343073286 -891508133 false 157558532 -8242834064079599835 3216941948312455088 -16569.178 1.59969176917206E9 52vuZnsIYk 679348165733153941.420817547 +-9009242164030364741 2024-04-14 2024-03-29T14:21:35 LoAJIMmLL -1780962639 371008715 true 1963721362 5674558646102847149 -5883182601484947977 -23995.098 1.937043680508618E9 4sIqTvPt0n -752434091224892226.902110173 +-9009250458956005556 2023-09-23 2023-10-18T12:42:16 gi1CG8 -1326797827 907717332 true -1831501287 319928254146770358 7337894556957896519 -28036.09 -1.870762020606629E9 CCuyc05kZh 241885266874475984.311976800 +-9009417566918635423 2024-07-24 2024-05-01T19:28:47 ngIy6Fct04itGKqX 1564607419 366989140 true 221634287 \N 4158066097036243137 22855.271 -1.117973742570017E9 hegAbsQR6a 322613955832282242.911360460 +-9009479919170886122 2024-01-16 2024-08-09T15:40:52 6AB 525065041 -619253216 false -1598877545 8338389530478259733 -5371776455727430263 18183.96 -9.5002157171402E7 RmrTCu2RSO 543805033598398027.951007848 +-9010531439098919058 2024-07-07 2023-11-18T11:48:41 bROfqFXUNzjQcR 1742990877 -884627435 false -483619076 642001069952670871 3706644263648420256 -5426.05 -5.9447016200545E8 i4t2R3RULX \N +-9010886327720405298 2023-10-28 2023-11-02T00:10:16 AS9sM7 178575909 -577340073 true 1841772807 4859230222948199509 8601194695470330986 -5274.6714 6.44327407950247E8 X6jMrI4JfC 135978064023155719.569290387 +-9011053530963235042 2024-06-18 2023-09-28T00:19:22 vn71V7A13gUGBT6O7VMJ -83862849 -505187767 true 1878962520 2543471706489097766 -7573855285278360850 16640.438 -1.802674846322226E9 TL5WLQzT4H -707610870906548255.592565590 +-9011096488665537093 2024-06-03 2023-12-02T05:13:44 03klJm6i -815027861 430469146 false 102414545 8971782594595266768 6389624797487750557 -21166.238 -1.803248528613213E9 g3QUrFGLRH 750349266411405987.490734977 +-9011668381518080411 2023-11-01 2024-05-20T15:22:10 jQ1hshMhcCvpJyhHqbJ -1310811552 1054949789 true 75361779 -732191163591300737 \N 19454.613 5.90002492882037E8 A1kDDC4s16 -157907663838252776.964618167 +-9011850788082920721 2024-04-15 2024-03-02T03:28:58 V1K 461142310 1916267107 false -1121362965 2966535354618400681 -9137025828343584938 7876.9595 -1.636314834714288E9 q2Aj9pQoE6 43481119060907229.916306314 +-9011956373067268327 2024-02-21 2024-01-13T03:28:29 nt7wRXbZZxfNX -853289782 862171189 false 1787339231 -3987336678986562009 -2367912985251212549 -5238.029 1.326143423164917E9 i8PiF7BGY3 -495478678812053057.790456611 +-9012100105085298418 2024-07-23 2024-07-10T04:16:07 srbS -1348622513 1110431651 true -258560001 -7108061424826009383 8032569743779432424 15829.911 6.12006620391594E8 tmNW9RAUsy -353769774749027019.826641954 +-9012304551671773681 2023-11-16 2024-03-24T20:10:24 1 -1160358353 729221283 false -1880240455 -7696906999892713244 -4586719009458457183 -702.01917 -2.8249891329007E7 b0gUrTmyP4 515622250771875811.261298823 +-9012825731271411994 2024-05-12 2024-06-10T07:09:12 ajLAh0pIdWbB \N -1602294094 false -1597403907 5307649291564861829 -2286519166063733683 7058.585 -1.962155990020306E9 JIYaOJXRGP -175906817331059743.291384369 +-9013703569914410269 2024-03-27 2024-02-10T14:32:42 PYTl \N 801215720 false 7488299 -7010298901173561256 6501015876210690734 -22327.133 -2.048594062566325E9 EKR2FXIGck 223347160875748925.713981827 +-9013728466238012884 2024-02-02 2023-10-23T05:43:38 20ISBS2L4Nrherh2nXE -512216267 -1895409238 false -181240478 615572521157605291 -4906104122557254611 23543.36 \N vfQxtoxlnX 624452847232104270.130434713 +-9013890007950228984 2024-07-24 2024-04-09T15:51:28 VSkD43IVv 1071075789 -549536912 false 1153203721 1038898903912163165 -1497581149464293321 11852.917 1.73731808869312E9 PLqM0KFwy1 -135492742889943218.807851629 +-9013991484388733205 2024-07-22 2024-03-11T06:26:49 NdPDyOGW 947807371 -358473398 true 7453671 -5395415981010030581 -259727156551548425 -1653.5094 3.31957684522365E8 heR0BNYaBw 507572046382486923.445582866 +-9014073539809220780 2023-12-05 2024-05-25T05:09:20 ffKnd -1204722623 -1843609925 \N -1424954025 8363253713802283079 -1037768317729944330 13547.0 \N HgyyIz6UT0 206006162526574391.633606800 +-9014373245399755110 2024-08-15 2024-03-15T04:58:25 UgbW41 -514330159 1518105992 true -1445734777 -8175547169924917635 1382478123930509875 -28973.242 2.008258994876718E9 M0BqCTHCuh -810996628180915974.517241200 +-9014585020082092191 2024-05-03 2024-07-27T10:49:08 QA1 357339688 -284041410 true -702933394 -1849326204697971416 -3085332999692797369 -9885.544 1.66938420368838E9 g8VC65iEOb 110994925750874592.707066844 +-9014761678782832145 2024-04-30 2024-08-30T10:19:56 A5GdUapL -1247911580 -1678357812 true -1291229659 1422963675804074423 -8236529271454328710 -12964.735 \N lrluQHyQHh 249598938761705303.602464142 +-9015066301458746679 2024-02-28 2024-02-16T14:09:20 R \N 1031353425 true -923414285 -4382631234600856641 -8942383255863406619 \N 7.27370074297134E8 ZnmBmugVZe 177321003862194772.338104012 +-9015558782703157247 2023-12-22 2024-03-01T12:35:01 fZW3h -1913308038 -2080682887 false 455535796 7809132333778196631 8948419271347899822 -13918.073 \N 0PCx4NXUCA 627431916029955043.665258723 +-9016187294157400163 2024-05-16 2023-11-13T05:24:30 81C2ZhjycwP6a09iiwN6 -119407152 1268853671 true 1500224625 2676205951113940664 -79375104852657856 27217.602 -8.191650171427E7 jkQ9kZ2kaz -237724322480741467.152938487 +-9016287436618601038 2023-10-06 2023-09-08T03:31:39 AUiqnaj2VnMtmywIaR 2103490569 \N \N -1360848957 1645827491846087022 -5409306364463625465 4291.9014 1.807851958239868E9 O0zTY8MyrI 901696700127387282.700050937 +-9016809257649414434 2023-11-14 2024-04-12T01:46:41 BN5PyoBlpMEy 1133116922 532814847 true 1259712357 881570549578773181 -6380929044298304588 -16611.506 -2.087777962083593E9 uVaZehTIGB 116561821526168816.298908080 +-9016961275773294821 2024-06-02 2023-12-06T04:15:26 -2109456995 -1518309108 \N 1624305363 4754285097199879823 -7757139320123094535 29398.621 1.124821848725874E9 kYmPnhD7l1 761300482541224488.312150838 +-9018347144022284385 2023-11-22 2024-04-11T22:38:30 ZRtod3B -600233135 75236889 true -1978164849 -65854042486787279 -467285010561758058 18590.475 -7.58198603287465E8 StCyeVBy23 357970644737033753.727278859 +-9018548166691412383 2024-04-24 2024-01-08T14:14:01 R6fq5nnuN 1491519333 330226864 true 363501364 -1016079271961172678 -5461577538924287792 25112.428 \N LWhuOwmvRJ \N +-9019036521679553505 2024-05-10 2023-09-19T04:07:46 kRXAa3 316592050 1855968182 false -1723311669 \N 2057673256008257022 -14393.939 1.138012885443676E9 xpmhQDEFZW -349933713016554206.582940020 +-9019055729739559512 2024-01-02 2024-03-27T11:12:52 XvLFunDjWiL4FqqFDIEe -1852010531 1664781174 true -1193556107 6852696870243382157 8417167170596904521 -30130.088 -1.897231231826223E9 eSzCMJSQWw 966841204376018329.933596465 +-9019076784740451194 2024-01-09 2023-12-23T13:36:39 5VjX9WdF8AD -1080366483 -1780774530 true 282198477 -7243081080065548988 5038536586376402780 6858.81 1.474049805927416E9 7LWMhOmZhM -94715266936554116.274799589 +-9019568685450499046 2024-01-13 2024-07-30T15:26:25 bKBLcEQpA3bl 2001402284 2099180003 true -913475944 3100912198688971803 \N -5892.234 -6.37753857675919E8 BlCxTf9tTf -742315170978175786.821544905 +-9019765052356291891 2023-12-18 2023-11-13T09:59:55 ZMM 1357264780 394667516 true 416549832 \N -8106427583087021177 20846.432 1.648156226015761E9 44jT7Z1hfO -557005267438240368.214398417 +-9020114863246542547 2024-02-21 2024-05-30T02:37:45 \N 2110849972 613873494 false 242422710 -1606833098599393292 -7489220386553056836 19760.121 8.67680168683048E8 BxXiVMv4gg -690992225832145777.640454185 +-9020726744940298594 2024-06-19 2024-01-25T10:55:46 qbHfquF6fqDdQkBcEp 997532162 1256173280 false 1134898423 5930299164579211520 -6031913207993626616 10594.055 1.57751357172626E9 \N -57268878811764185.412855427 +-9022106176449517957 2024-01-16 2024-02-10T23:14:08 839718974 -2058364191 true 1056846964 -1696454162466876960 1915205742598085725 -4006.6997 1.276314548459301E9 9r6zv4X1RF 807243584910446531.286699187 +-9022383299828168939 2023-11-05 2024-06-01T22:48:17 vvCOkYUeCGvD 167454689 -337706355 true 1598807810 -5254679331219894197 -5459093086435818599 32066.13 6.46064212844803E8 SXWCumXnDn -744711244755022955.843459880 +-9022533769897104647 2024-04-29 2024-07-03T03:54:50 163937783 506051983 true -2085333225 \N -3071248434481817743 -22182.64 9.86960855861738E8 bs39qIyvix -126001312180539542.932516380 +-9022577078221430214 2023-11-05 2024-06-01T12:28:24 SF4p2 -138057582 -664009129 false -1035300757 -5830832691049376876 -6640893997928151472 26548.027 -2.024080876372509E9 3SdYnXPaiJ -691327623684103639.753112923 +-9023077207591459340 2024-08-13 2024-05-15T19:38:34 jtW5bxL2 741830486 -1786859305 true -111672902 \N 953203713597199166 -32191.316 -8.8363733654679E7 S3izgw5I6L 173171237966742326.884491290 +-9023193196020213731 2024-03-29 2023-12-17T12:17:25 xdwKhpv4bbUaQXZX -1363977475 1976334282 false -461973164 -9104589529180784321 7006053765285770136 \N 1.85424966322232E9 ewlknMYpoS 57715595502624713.403873701 +-9023217501638997943 2024-02-18 2024-02-10T03:30:17 l60D 1371105758 221877688 true 282363819 8068270535971593072 -9218015274326309794 -10032.746 -1.71249652384537E8 6lFFs30KH7 992556312893136982.711720916 +-9024227180653264209 2024-04-27 2024-01-26T07:37:19 FzwRa3PBqXyiaWD 1332307116 -108386157 true 1789920696 -6872340768274827356 5681487160985311020 15128.604 4.63195070609215E8 2GwhypCf3p -97071590470751450.488699497 +-9024357240815351602 2024-08-19 2024-07-03T05:07:38 jjEK7nf 833988671 -433361901 \N -2079958792 433881225210632871 731021313015178538 31761.088 -9.69611209041877E8 4XCg1OpZPh -427900430054747341.308458930 +-9024563519288544129 2023-12-17 2023-10-07T12:27:54 XxXeZK78qOrTk2VM -1755290174 -1367841644 false -2120516774 4868690727338665809 374688378734705653 -1109.5509 1.756334357085792E9 6GEaVWeBbq 527477018594336888.478200165 +-9024798679731742432 2024-05-24 2024-04-13T22:26:04 sPi2LkAEKS2VPEMDL \N -1403255750 \N -751312452 2575295474109530450 -1659603566381478799 -16139.917 -6.72568564400001E8 vy1j7MxXjj 144600001270227987.211063610 +-9024950956811248147 2024-02-12 2024-04-06T02:42:17 \N -116002425 -428288358 true -1228693298 -3768005058246554130 \N 4811.8984 -2.99589393171103E8 e8ivj7w0dF -869419807028568775.782828669 +-9025561961434591092 2023-11-23 2023-10-05T13:27:24 -671662508 1026265787 true -294863968 8246945195540316396 8788324527194270023 375.21854 -2.17408238391152E8 5fSFfAhVmD 458941106996334069.992285190 +-9025991266615918329 2024-08-01 2024-05-19T19:58:38 qec6x -1328880709 \N false 1960654992 623277187389924343 4511237777423210508 -13395.91 -6.34588945097302E8 87XoVi1Mwn 463920143963660173.807006390 +-9026133513344310725 2024-03-31 2024-05-25T18:02:56 nTh5 -1016104233 -427312114 true -1363452076 1495896025499609806 4641477326734504216 -24991.32 -1.309164508862603E9 QA4pmTWRw8 -140018088172142767.913181865 +-9026441206598146015 2023-12-27 2024-03-20T02:11:37 LzBL9uM 930196904 1712128094 \N 1174931421 -1008963035865119169 -7278991883204438143 28089.586 -1.9597824477258E9 pjsd3Ngqab 944855226445234581.973536592 +-9026511130979008175 2023-09-13 2024-02-29T23:18:51 1 -61242113 1365922969 true 1264389289 7551542183892877735 2217938489970667087 18841.92 -9.70620623752988E8 Wq1Vxyhpgx -715589068742991236.366978644 +-9026613033966244204 2023-10-25 2023-11-21T05:08:32 IpOKl -204259506 -1746426556 false \N 9094149344646031670 4912834686046597903 \N 1.260235387738955E9 7icRV0LxXM -308766845754490739.980895009 +-9027996900377922018 2024-06-18 2024-04-25T21:38:15 2038447768 -1008265595 false 741337924 -7748788968966476039 3873148411936936978 -18778.383 -1.29162973541249E8 \N 739561429535353964.573606390 +-9028732150412236906 2024-08-15 2024-07-23T01:55:07 xEB9LrkCR3wZDPVf -894255800 289815915 true -1660237914 -4692235592592755514 3158886174361173839 -7726.34 1.910308578089525E9 xOQdu18MvB 80224270364874148.909541190 +-9028776319649068681 2024-02-27 2024-08-28T09:30:16 MUubzyYyRX8 -672098645 -1679815130 false 359616739 -6299627527108241155 8476112609149685706 -28043.041 1.123060852677995E9 xc1pABkjd7 733996463842339709.221822476 +-9028794441048151272 2024-06-16 2023-09-21T22:15:59 RCAyqaa4DT6We57d 218685963 -1823187065 false 157826830 3907695775221070066 6250516652225581217 -25934.08 9.69002234288876E8 pL7K11RRBa -783543858124343835.591747658 +-9029238545309994646 2024-01-27 2024-08-21T17:46:59 a7t -286923976 -1580489625 true 1818726125 -7240278858402997226 444718719561069547 \N 4.3602795433422E7 wHFnBfTYv1 959213070907910097.743566793 +-9029262841550978408 2023-11-22 2024-01-05T02:51:49 \N 732267658 -999366485 true 948130596 -2388114305533950675 -773562866432042514 17719.37 6.66086519466282E8 MueSMYBQ88 734695197481099901.127661995 +-9029459546272649733 2024-05-22 2024-01-07T22:21:34 M 1994289693 1385866962 false 829716420 4606206269499474611 -8330885648809734452 4677.1855 \N QsU8gptaDh -96454989082639418.955750131 +-9029978467733501118 2024-05-08 2024-02-17T07:18:54 7TJ3MW45VI 974921105 1527258657 true -1427570802 -5555526939464508916 2771658355866491400 27776.744 -2.059890771080039E9 yg0hre9f0U -907743122369557506.306180120 +-9030219861237537179 2023-12-07 2024-07-31T17:46:12 jT97NRstYtH4I8CAlQOJ -1842426128 121418711 true \N 8261746672170778899 -274766983153276584 31506.39 -1.775275274687029E9 tNJx5CBojw -878398901698006073.506818878 +-9030237687569382422 2023-11-02 2024-05-03T12:04:27 eS5q3OlNaS2L -1603257301 -395276994 true -34074452 3120957372607691196 -3956260261277190332 27959.105 3.90429868855214E8 stqNLokH3v 256581346077509493.977535130 +-9030255866163430120 2023-12-11 2023-12-20T05:25:14 J1561kkVgHY -878756992 -161553866 true 1536962330 -803141742284691807 -4896496675169503945 23727.463 -2.004529784810617E9 1LPzdVJoIF 621525468101756365.595476162 +-9030258512155965480 2024-08-23 2024-07-18T15:55:04 kQtdzye -2136563518 476062065 false 1207890129 4315329928327015257 -3135279764425122954 2606.1926 1.291833945769389E9 njwVZ8kUrL -369974018678649275.500790884 +-9030423376200022605 2024-01-15 2024-05-22T17:02:25 CwGYFh8t1IOSg 1184800549 \N true -1164975224 6314803531402067172 -9153536845430594209 -5292.4644 -8.96660542498804E8 Hnshej2L9l 428660372210165560.696257605 +-9030900946970020306 2023-09-26 2023-09-27T19:17:51 pr0fn 141825579 815603584 true -1106487988 2603451985946539547 -176811282598369938 -16993.953 1.77054722566559E9 upQ2oQ47Hz -262986342564834412.537241083 +-9030940327652712279 2023-10-02 2024-08-16T23:15:59 C 1022545465 1198817598 false 537127937 -1671253795908993474 3173013409509676074 \N -1.029603233345332E9 QOABuPwDfF -468786976397279254.956712160 +-9031761985821261378 2023-10-07 2023-10-15T19:25:20 PqXkhamrlUIgXxA 1782364153 -1144869004 true 792495774 1551167107925699164 2680686323214058592 -4770.0176 -7.39374637186933E8 Z3eI8qnOKG -915703175581721985.154054652 +-9032216721617450924 2024-02-12 2024-07-14T14:27:44 KrrmM -1166777678 -537096639 false 744790065 2923966023364701159 -3724567416202466203 -13586.925 -8.83687744899216E8 uw1ItF2YuN -953257945534660464.516494685 +-9032515137348703933 2024-06-10 2024-05-11T23:28:22 XT -1731891259 -1596276717 true -1204219750 \N -2979383027323411983 -9429.581 -8.46669997140315E8 XUfqUR2Mi5 277096193962136289.164310887 +-9032785779932723063 2023-11-21 2023-09-21T05:17:23 P -1039385546 \N false -878111810 1745480027102685793 -1363519021345179149 \N 1.386005208709547E9 le4JXIvoGA 367716006400298864.348240307 +-9032806922003887037 2024-01-26 2023-11-28T02:50:46 Srv00cLYnGAXq 726367038 -493973033 \N -1403187562 5684772537681848264 9147854292636134631 -7592.4907 1.538793041315872E9 s26QG2j1tD -685055596531906785.482974441 +-9033209800359087407 2023-10-26 2023-10-15T22:57:58 x1lTvmkOFO1 -1709405141 615903410 false -318849246 8667682741959940514 3805032396600439356 \N -7.02987820831834E8 Bqm93DgkwC 994195856082329081.499592979 +-9033807387363359517 2023-12-14 2024-08-15T15:42:07 P9kNBRXTvKkXn1XY \N -900963183 false 857473836 \N 7855940066629457890 -7834.5156 -5.3814989095881E7 UxNgbJUpGO -311149050509146806.275021322 +-9033855392706822306 2023-09-19 2024-02-12T09:04:27 -1946875409 1187010517 true 641259601 2032542089205509053 -5515175443078861428 -11598.511 -1.012097954458149E9 Eef3jxZsQI 270246357271214821.328323464 +-9033909860017302103 2024-03-07 2024-01-19T13:22:55 \N -301345240 1238019190 \N 675681554 -2048733244601633690 -4365462183655801697 -18566.584 8.24966566776714E8 1gfDJ4TS4L 476852612226647861.585266138 +-9034026200206319854 2023-12-29 2023-10-29T17:34:41 1082049814 -992899575 false -1784846059 3054294916260136110 2912901225832694819 18103.273 \N vPnhZCPCVu -336522375543950399.149761864 +-9034260074777395017 2023-10-03 2024-06-19T15:38:21 u7RyQH2BP 1589190940 941448412 true 1421638462 6285059058322186005 2866136329603583067 -31016.742 1.098723658187201E9 xbCWX5AzsH -541816101304373615.653205710 +-9034268882476644132 2024-03-03 2024-05-30T08:06:31 -541107622 \N true 1863629604 1236648581468345248 5728066895598743896 -885.0308 2.128024588391632E9 SiOxBmuBL4 312076534253416755.606109883 +-9034403533580118272 2023-09-05 2024-01-18T23:09:13 Y5C0KAlgUBak -1136754121 1391459920 true -2126113313 1812711366003930438 5486202885787232445 -22405.568 1.665744790891229E9 QOGLDELKfv 628358787846604492.386121078 +-9035020334302822002 2024-02-10 2023-10-29T10:08 O77PNBIgNLVQRLbO -1891980651 -50378627 true -97861301 6742548703777141080 4004096655421840411 -25663.857 \N qi0SPFQzto 846934625035680136.388349650 +-9035211376343981589 2024-03-07 2023-09-14T22:49:42 1gw8puJiXJP -7717869 99119964 false \N -8435400221742698534 5067349295079573345 21916.18 1.111651877875893E9 Iurz2fxYlj 131161975241208702.144088800 +-9035634412280105776 2024-04-06 2024-02-25T19:01:43 dkc8XBgAxe -643604723 -1713735595 false 1448012603 -5049839913919818822 8878339743993641246 2737.5273 -7.9942213943596E7 Mkald8cTHH 555417640168878440.133048723 +-9035732493469448828 2024-03-23 2024-03-30T02:36:37 PM 17649939 417277355 false 2108051510 -46119818300273464 -7783760163091689517 -4207.91 -2.139912022149662E9 Uhaw1cOAuE 734900847091901523.523620699 +-9035963852052989596 2023-12-26 2024-04-17T23:41:19 JtLFYvvgQxOPBQu1y 1134900225 861637716 true -1599248193 -8611297459567010270 7239514551452962687 23580.717 1.323361370204131E9 cyUSzHgyls \N +-9036010988732138483 2024-07-20 2024-06-02T18:52:37 8K5Oa4tHnV4ZTD4a 1258552247 -228428243 true 1883742599 3561343585111395822 \N 2926.277 -2.60733053329477E8 wEWWAAkMKz \N +-9036471317882999236 2024-07-04 2024-07-30T14:04:39 \N 755306606 2104328438 true -1944471552 -6873113794661978270 2647861930392987551 -16239.199 -1.132583888647407E9 A3At2QDuZO -580567525843387112.316301520 +-9036720110632558511 2023-11-12 2023-12-19T14:50:40 lZSCWEy 1018730536 2028461205 true 1794568325 9132323527567617342 5034908365145348138 -9455.507 1.044068622563666E9 ZRAnN28z1l -529651980914221175.970774020 +-9036910680284138768 2024-05-17 2023-09-30T19:27:43 Nt 1824214764 760334972 true 1771150960 -9114452876472592720 -4302097037753860470 -1674.3461 1.493170919777275E9 Av6fvpv802 -375146761224786192.163606010 +-9036940716953832998 2024-08-17 2023-12-03T00:32:45 yKLoZmicfeLZ 482741073 \N true 1147175866 -2045515986418063341 -1263720215171695430 16227.297 1.23766252276746E8 wvbajXmwM6 440762415664268552.626800240 +-9037180617712030121 2024-06-23 2024-03-08T06:46:08 SvU6t3hRyXB 364681703 907901335 true 2129629300 -5672996447373131144 3884051217280489046 9853.548 -4.83864304946822E8 A5fOoulFkP 419773369597889653.653890310 +-9037303333127484271 2024-03-19 2024-01-14T20:42:45 0yOLkHtky -198984140 496473920 true -2114087426 -2693503515263565021 7361647320842383503 \N -3.7277881965387E8 Zv5KEsEjrz 70907845000122905.973430630 +-9037625672339995882 2024-07-12 2024-08-02T10:16:49 6O8Vrl -1363395291 355291427 false -1851750227 -1611564022041737811 -9161083956637862 9842.408 -1.889609599373533E9 L48m41ix8h -996680249525075261.379861624 +-9038371794398689612 2024-03-02 2024-02-23T10:52:42 -2002584595 -1305382149 false -551376301 7993876025934132082 -1672143505289116937 14541.146 1.146579637940886E9 94u6SMZljI -953679286863688667.241771574 +-9038664048599294083 2023-10-13 2023-10-04T09:58:46 TOiekQ006EVnVekY6U -943900391 -631937981 true 1620649277 6147246623380900717 -3330147422676278368 -27943.578 -1.405315975943487E9 zrzGgBekDE 22226951415700486.987688058 +-9038793201242021162 2024-06-12 2024-04-17T19:34:56 y4Gi -2003649492 615891765 false 1161834597 9221867568934167034 -2899781941950010493 -19622.266 1.57463821151925E9 qmeaeXjIpc -396124113219610521.376569117 +-9039477658063817669 2023-10-18 2023-10-02T06:37:33 Unqxi1Zj9kVP 1362154145 238456990 false \N -5741104870132720553 -835790174492375970 28624.154 1.727985158039601E9 VnjnVzV54x -986269799289449115.148455241 +-9039811513841930564 2024-04-28 2023-12-02T06:03:47 b30qRBo 780214869 2132611714 false 1117308788 -332384853213916564 5270525567000722904 32427.48 -1.23864330630189E8 sad5bjtCTb 368312330310632487.352974516 +-9039987306248039776 2023-12-06 2024-06-19T08:49:49 flNqOHjudM -1352835799 -749663213 false 873681822 1721293650634671632 18585183174163536 29125.684 -4.34298388991105E8 GhuCxOqQj0 471946968403932286.443120170 +-9040421253995237083 2024-04-23 2023-11-21T22:55:53 \N 1654352257 1266359615 true -2098654140 4752166674606178553 6215891901333740432 -27125.518 1.368218872758981E9 7r6HWaPoV8 470012118425606052.488614519 +-9040629634248873644 2024-07-21 2023-12-26T18:08:10 ScwaJDsD9kcJXTRC 1629681154 214734273 true 432629932 -4254731537077506791 6491782119774334797 -13956.479 -6.92379753173797E8 z9HTvN5n1F 887554655613873501.675591994 +-9041015212672159136 2023-12-20 2024-08-28T20:14:28 W89 -1724546752 1365474265 false -1041354654 -3612909746431929423 -3875802666093742634 31625.412 -4.50512907076202E8 E4UAiw5Plt 961272867009844064.779493822 +-9041100274283064050 2024-07-08 2024-07-30T00:06:18 plfprjMlvE3JoWhNoK -1305154670 2039453321 true 406348297 1360380060816033654 -1133325797511590656 -17377.17 2.019429376383026E9 sRXleyVsOb -294147490207479360.811174448 +-9041291392347206626 2024-08-26 2024-03-27T17:30:56 MSFUjD8C4e8 -2058080027 1791506970 true 1397457739 -3349703231231755686 6969221847623821357 10413.466 5.90397222128457E8 rsbqpaPt57 465883393500230936.486874948 +-9041362079113677510 2023-09-08 2024-02-19T15:55:08 v 1049545382 -1866880323 true 973430894 -1783710044336675561 4248172049962726525 -23083.492 1.263451613603645E9 pDLNbPudSm -200817263152534374.210954175 +-9041533739154890062 2023-11-01 2024-03-12T18:50:23 eMU -1374505663 -1672878382 true -864855442 4584349873003868224 4184471546801232940 3512.0872 3.35697279347974E8 gP8SU9RptY 13049090066153698.967404575 +-9041949405927782219 2023-09-09 2024-06-07T21:44:36 uaR8Q9nC \N -1208492930 true -1671204400 -5405983533115422406 8891424579738061027 -17921.55 1.209769580722994E9 akr4oc5Yzv 496997991817394401.422542809 +-9042077071575134483 2024-07-30 2024-08-13T11:22:26 AiDYxA6 18252541 630557175 false 1337961145 -4999819421057444737 8900872331914515686 -16656.727 -2.090591966553195E9 L2p1fu9zWx 49275149072651054.680772163 +-9042111758294194690 2023-11-30 2024-08-30T15:43:29 gfowZzOXAgluve0nZ31C \N -110853108 false -1306265319 7986060466168654526 3091297390998023862 15109.741 -2.093262497983416E9 eZaDDJaiJb 442040622732916386.691958876 +-9042117811004908286 2024-05-01 2024-05-05T23:00:33 DOU1pZYFLWbbCjsMZG35 -1419124992 1013626412 true -1146985272 2097920227641811249 -6237299208482747504 -7107.4194 1.758906477036228E9 fWg3swHkbw 549565297805005748.961184741 +-9042474765721508184 2024-06-08 2024-01-07T04:36:16 vAXFYvGml -803074539 110335737 false 728406067 -8650377695861303901 70651030283062949 -4865.4663 2.118617012265521E9 1B6swmiVLg 930080186719466313.538864184 +-9042569606943281845 2024-04-30 2023-09-04T07:25:36 ZzrGz9adbC3z6eYC8 -768962341 -400995347 true 2145599383 3566522315802091433 -8692241907296625724 26678.893 4.9043611530669E8 npsVl8i2RU 181919839781840948.947447679 +-9042755742150484085 2023-09-25 2024-02-24T03:26:25 MQoTp4RRbE3oSAzSzV -1342980858 -1587970674 false 2037402599 5509078184085265560 429143045685361013 -10957.218 1.825096135332306E9 yRnLDoA5KS -142549043138430520.350728957 +-9042821066226549721 2024-02-27 2024-08-07T04:10:09 HTFtxyPLg6zQ 545284651 -2011363136 true 472286122 -2540597444787579189 -5532150854620867263 12913.684 6.63667068189947E8 UpNHrwOTje -264904962538671605.786605801 +-9042862114852112311 2024-02-05 2024-02-22T14:03:34 4cgobfeSp8V0cb5 417536925 95753676 false 1330398813 -5537260765301127760 -3896258670782772251 -17419.938 -2.112482036189718E9 XrK8fvc0ba -790212337857503090.655662856 +-9043487061728145853 2024-02-28 2023-11-11T01:20:34 zqtMS98I4v -1218676070 1172422156 true -1339236764 -7366683725985194031 3694819557391592191 -6979.265 -1.453335357376869E9 qwTdiU2TDV 534275742886970275.725909482 +-9043648428009298593 2024-02-07 2024-02-25T12:49:36 IHo -397279511 -1805501852 true -553203872 5635567910851153944 -1645191888011221124 -17083.75 \N GOVlnStES6 777581383268424882.633812519 +-9043803389582197476 2024-05-31 2024-01-09T08:16:17 T0 -1339730097 1987735794 false -1202903140 3378428405534282428 -3008700301687562133 -8051.726 \N cwt7XW2Vws 294958024967995003.434511623 +-9043830391643438515 2023-11-25 2024-06-07T03:22:36 P 2126952843 -398276150 false -1224263365 9149460622557433392 828071271888993635 -31517.402 -1.578787866029182E9 zEXx7UvMyg -201908654691807765.321490645 +-9044778052439268263 2024-07-13 2024-08-14T17:00:36 ENns7 1626461321 -802295141 true -206589982 -2884648223928840349 -8787693271163646244 15158.464 4.82158567935164E8 tNWYaixeOI 71353391681139582.713086785 +-9045127442873543053 2023-10-01 2024-08-21T18:34:19 W2Mna580csLIWXmKTd -2105901115 -406829690 false -283155432 -8366889019658532840 -6375878615228785410 18903.195 1.683264488427601E9 D7rEJlYC19 -364624993360595311.614169618 +-9045248170550816564 2024-04-12 2024-05-01T05:53:17 Qnse2uSD9kYCmb9xEUgS 1804992133 447541918 true -579632078 4234801087474096256 -5289523169301902526 4007.7869 -6.09699127214958E8 v1N9A7YqBh -729163848903781348.776741314 +-9045870899031353937 2024-04-16 2023-10-23T00:17:45 qYM32qMnRtYI8zuz -1668692306 -170925663 \N -2007738023 3219808813464933512 5214966804866087098 5058.488 -1.588317971445421E9 GGZP5EEdBF -723260829424296212.491761200 +-9045906785798083042 2024-08-23 2024-02-23T11:57:44 OjCyXO2LIt 990784469 -193974924 true -809639771 -3082347770375540755 -5834057509633133476 17371.023 -2.010116214050011E9 CGlKBUlp62 430133673402235494.333211930 +-9046040638584853379 2023-09-17 2023-09-30T04:41:46 xSP1EVw 1891055285 267564747 false 449663066 5938282837796645842 -5152683597521249204 -16093.297 2.069197638217531E9 KTqpMB1Lwc 648442013722929948.539150270 +-9046100188508793606 2024-01-02 2023-09-08T23:00:14 Lwu7hNEXA 1741685841 1677918458 true 648284484 2049389807304852983 3669479963736664292 25752.318 1.582980005543656E9 FYCi5Ipkpw -250801636022727059.972655414 +-9046312803411077404 2024-04-22 2023-09-05T04:59:49 744509484 -1280851519 false -1905780830 -2469144013512270437 -1911545731855118431 14424.25 -2.27890287082346E8 xQY8EGIvWW 308290979135858616.318491421 +-9046587029363702915 2023-11-30 2023-12-04T10:06:38 mJnRh6FaLAdPd 24309663 1169208228 false -152954886 -4945336350952784215 -269002125241174184 6429.512 -1.751899713776891E9 Ynm9xaG7PD -787789832300309944.214508372 +-9046609586158810664 2024-04-18 2024-02-26T12:46:51 L 388078761 -1595406426 false -134888087 8855463462209785443 -4440903803037864880 -30365.709 -4.92289933739462E8 BaIpzcrf2q 531350874825471952.199227344 +-9046655753649533640 2024-05-02 2023-09-29T21:21:45 zGSDryWr0lapgc0P 1596164926 1826904114 true 1419034140 -2509591161822246980 -1848122234249942347 -29259.664 -1.334304961242723E9 oKCMrFh5GW -793155089175876250.714586205 +-9046700703040369220 2024-03-21 2023-09-08T01:42:04 iCoMsn -1681043316 -1426831649 true 1152121520 3531256868025156994 5000997857172812893 -17923.13 -1.421326285367453E9 iMoMNMGjWm 61607963114630306.377605110 +-9046744691852372232 2024-02-16 2023-12-06T13:07:11 -722883907 -422145581 false 767972685 -6272141137037942825 -276571485400601240 -13424.618 1.95457124825452E9 jrvp58YBXa 759591245579802367.916130255 +-9046967499973367051 2023-09-01 2023-09-06T10:34:56 jQg1 -1335987506 1856180169 true -2033986296 1260016291736609521 -2610381254114364817 11160.404 1.109672395084103E9 33eGuR0hMC 372786885977901121.313095193 +-9047028493422646830 2024-02-27 2023-10-18T07:45:26 dSVJXsfPTd1E -1068416104 1075316058 true 2066675710 889699823857689314 \N -12165.797 5.35652395365789E8 P0GC1s6Uu1 942777515906756707.659324014 +-9047120326124548442 2024-08-16 2024-04-05T05:06:43 -1163504779 -685735216 \N 640773536 6534469819934732137 3720901780741809029 -10936.192 -1.257788351016142E9 oHV7BYum74 965005953282944872.218761350 +-9047745852559644311 2024-04-20 2024-06-03T11:24:16 31EVg -1385937614 -609288275 false -2055738061 7229884550875216153 -1045149907107561510 -21644.252 -1.326654890449383E9 8OZScJWTw3 -469676840589611036.965757914 +-9047932736833034517 2024-07-29 2024-06-12T21:18:24 UQJRXdIdF 1191292095 531925939 false -1163443326 -5620506423936901916 4617049401069047966 -24849.586 1.837118498354104E9 eSxPNIljfG 207477450183360177.781399206 +-9048116634484955362 2024-07-23 2023-09-27T17:13 RxKI9t7zfMxl02k0v -2101512729 -2077356343 true 643572793 -8825507368435322632 -931125222061658338 -11662.581 4.44092738721269E8 zwLQWutfYO 39211226341103704.195612919 +-9048476790256407143 2024-04-04 2023-09-22T18:07:25 c8HXoAwNbGrHuLmg5L -1303176980 322344889 true 1780086400 -4586211376173743726 \N -19016.266 1.23073904393665E9 WA8hFRuBxI 173515671906217376.562618934 +-9049083647104904103 2023-12-30 2024-05-23T16:58:54 0uQ -12213936 \N true 1587239700 \N 4430615178690129312 -6820.25 -8.64527234504099E8 \N 587247135121962171.152204025 +-9050068181322397912 2024-01-17 2023-12-08T13:51:31 R 1797155955 -1002717733 true -2043283004 761278525754798279 5074010406722596600 -17563.377 -3.01096125363277E8 2TokKq9Jmr -335464694405703993.279409000 +-9050792215730583892 2024-07-31 2023-10-08T06:33:21 XoUnCN2ZrYR9PThBk 673390202 1453692885 true 1638141970 -4330135080349069175 -7220760303732924404 -10848.502 -1.787013147496427E9 vUyWxLjOZF -959921204543530732.424714659 +-9051239308909563705 2024-01-18 2024-06-20T16:28:18 RT8nlWrKmtS9x4 -1284066127 2084662486 false -376056494 -6567227497459374291 3501439502143330245 23343.287 1.900528159326881E9 Lp4vupgYqV \N +-9051249249664500341 2024-07-14 2023-12-08T12:26:08 HpISZH6kW2Nr4 -1795176447 -1747681177 true 1850079946 -8329509091450231120 -623863927670620007 -27213.945 1.669307620477657E9 BdIAMzvoF2 291332216288950195.191708094 +-9051332963185976655 2024-03-10 2024-07-26T18:06:57 ZYtPmAB8F -95131683 -428973957 true -1955512361 -2408248954454187038 -4027111389087311658 26768.666 1.961488305523483E9 IQGQHzr22s 90823788387978699.314217747 +-9051587428714925890 2023-10-24 2024-06-17T17:02:08 pfIw06iJgs5ED339mJd -1992297461 1579334887 false 356430117 -3330760976926502306 7372724889471366397 \N -1.505630464425249E9 mmuQn3iSoX -793008365459846253.265545103 +-9051711865328942840 2023-10-11 2023-10-13T08:12:17 YbLNe2ZWm44ghHs7aV9e -1476528912 1688519959 true 577234843 -1144682147942800314 9163751694499466868 6924.9224 1.277383158847141E9 dD1eiWWyhJ 626435673572169592.578985512 +-9052567682549516323 2024-04-22 2023-10-11T16:34:58 YSyHf1C4MvIs -723384932 -1323027842 false 1148515713 5754067706686343573 -5785671069133896307 -12618.276 -1.890458407900842E9 rKaxRFNmuq -255423676298591139.377487036 +-9052835511625735180 2023-10-20 2023-11-01T01:33:51 g7gcq5kAuAo -1778249383 1833943342 true -1983651866 7177667750285601783 3406381036829692800 21241.875 8.0690868162058E8 kUoqTFRYtr 77791626636408463.336808292 +-9052935497919775447 2024-03-30 2024-01-03T06:51:23 30h -690638217 -1036130783 true 921365799 -5690674320816758228 7407010810501660297 -24052.406 -1.951538263768834E9 4C9oBJ8iP4 -313651924799274527.841493937 +-9054606473373379446 2024-02-23 2024-02-01T09:48:59 O9VRmVcmoJW 213443589 1432688451 true 1049521449 734747976009087235 \N -8374.43 -8.46513262022278E8 8IQhacxtwE 516489390170217311.723526413 +-9055068163444230121 2023-10-17 2024-02-19T19:10 A0ocCL9jM9oF -48927645 661414058 true 1104823589 7108189705747151831 -6739358412628432029 -15244.217 -1.169087377375312E9 9RUVY3DQvf 765231444711507571.483129666 +-9055105643051502978 2023-12-22 2024-07-28T08:53:10 PRtzb -526569380 -1945129237 false -778141889 5453604761821266994 8398505582309909163 17823.293 1.520862776761993E9 KlOoyqaYHD -598312220219763325.552463204 +-9055246596057953330 2024-01-09 2024-04-22T19:10:19 A6jyc6Pr -32283425 -1679987787 \N -1480276993 -2764651068510990306 -7828005459331092002 -8197.512 7.03010487138058E8 oAAo00AvvL -690960550173737104.638497247 +-9055404677738900362 2023-12-27 2023-11-23T05:51:44 k2PN 466375130 -261574681 true -1241598232 2164015932582736464 -4740917904243878918 \N 1.677801341598254E9 NEBn2pvZit 396572332227826735.496919575 +-9055614536664875679 2024-01-29 2024-07-31T21:37:20 rqsUeAbuDYA9jaBP 1705977333 \N true 1109516678 2990781717519851051 3898441769098355390 -13445.423 -2.17198897032415E8 Ugo0Xb04BI -344080223507110960.833590126 +-9055835934648533778 2024-05-26 2024-03-05T12:14:31 zy 1560116199 559456382 false 1211360583 813147526689708233 -1029709740767562413 \N -2.084121118087264E9 gkpI5XMsxz -770562820539748809.377299800 +-9056020646545612229 2024-05-16 2024-08-01T12:45:11 X8jQvc 2066601629 -1390921533 false -590255122 -5556386919885496224 1487444675293792946 -13031.419 1.294095818319422E9 TgaGoatWq0 -917697473980884128.842864251 +-9056675941007198178 2024-03-12 2024-04-22T07:44:28 ivwrmdzqJ 664328799 -1218461216 false 2039522645 3468043853934376261 -1385248352267138150 -14191.94 1.78589069354757E8 p7pgiu2VhR 248216578935220899.499153043 +-9058395203128730889 2023-11-13 2023-11-18T00:32:57 JoHCiwA2RLkXWk5 -431849869 -296875497 false -704203251 42302398140964640 7062985682047872273 11520.799 -1.26184781040915E8 nWbPWnXTTU 400153675231141795.537330952 +-9058552653206349551 2023-10-03 2024-03-23T03:18:49 tYkfO -1008178733 529848804 true -1309682211 7219594495082892065 -2275038205122522755 9637.048 6.51804303117591E8 VyJgLBcvnE -389644411245231831.860380900 +-9058748080721678253 2024-05-27 2024-01-25T20:18:02 ey7daJn2WwcnuVBpggfU 1257952778 588822752 false -94698462 -7297799349931786728 -812758561561022768 7947.835 3.24183711752249E8 1lMmpDJvT9 -569923382195934509.276384187 +-9058945877320269251 2024-08-02 2023-09-29T21:29 0TDjBvspo29mr9h1 -1112187841 167958248 \N -2147315844 -4512290982054010486 8715412726480582787 12722.276 1.147538610527948E9 xKVWZf7G4u -5927159188627841.766325620 +-9059323986510822076 2023-09-10 2024-02-03T03:11:01 4np 1269089139 -1077094800 false -1444121543 5529897475477579624 -4321131243819128581 -24959.934 7.09426274822475E8 Da81YW1dMR -517855092466997285.192435368 +-9059357378643119565 2023-10-25 2024-02-10T20:37:03 6d01eJ8fQvv 579655959 -95598531 false -313367760 -4299789849168782608 -6377811164319706062 12281.165 -2.108975820403289E9 Wmy6DN9IgV -150408073281635922.580310620 +-9059516301057685182 2024-06-09 2023-12-08T20:38:49 jKSvXUfTRPlnN3s9 762572486 1209440728 true 1199621250 7480976101298468441 4951475163352875117 7351.45 1.037612231504176E9 V1tY9zD6eV 378608839018336223.254516777 +-9059686116911518560 2023-10-27 2024-02-28T20:17:05 BiY -159982999 -1940765370 true 2138167903 \N 7864323319033283155 -5223.0503 -8.63694890979033E8 lkvdInxK6d -242000610993138802.749783881 +-9059778871569184442 2024-08-16 2023-10-01T07:21:03 2StHH \N -2059082784 false -221158405 -4387387232944581890 \N 25903.543 1.71726136912663E8 79ZaF1q8mL -856572336419358953.184042730 +-9060036329961318996 2024-05-11 2023-12-02T05:57:03 dMQt3OJ 271261623 727559026 true 1906489718 -7524266721903364562 \N -13332.935 \N hvTvwrFn1i -286640754899979910.448252595 +-9060120832047334132 2024-03-15 2024-04-27T03:11:26 7vzJk0xmdW0 -272529485 -580231717 false -2106717816 -5860419417226251349 4708036314157525978 7951.3726 -5.49065577604634E8 oLiTzZZy6O -909111758857388366.141644758 +-9060390223960509052 2023-09-06 2024-04-26T10:19:59 \N 665070672 -1242377560 false -476187439 2998686695860479499 1257542026201206389 1222.1982 8.30112328194562E8 D6h32hXCw7 \N +-9060862260981530420 2024-02-28 2024-07-18T03:26:52 \N -570512467 2002066873 true -741964994 1766032249389739250 -7938423228429081354 21438.746 -1.436405935589992E9 iSondhVrxL -280852608204813566.933729425 +-9061441512456956786 2024-08-10 2023-12-08T10:19:11 qlfWfP1eP 1580684406 1077931623 true -634062145 -4336254663486621348 -8172511851057345654 9302.61 -6079713.340253 IzUR71afie 963674611939520930.583405645 +-9061541185531141363 2024-05-24 2023-12-10T19:48:16 hm7 618673479 531835902 true -1531414580 3916944745796014180 -5899637429167055190 19112.41 -1.030394433665775E9 95kYxGWz3n 130172227214656280.360000080 +-9061891106827487646 2024-07-15 2023-12-28T23:32:35 U1LeawhExzdW -42040794 -1533000878 \N -1119819282 -6310076493872616485 -2500955093323866928 2851.1628 -6.4504725906099E8 rG7isQf4EK 761128365970331106.580253796 +-9061948986795719385 2023-11-03 2024-03-27T16:57:27 sKZcow62Or7TuigX 2118026947 1876620389 false -1831791484 8679408083195354024 -2075226329927374352 13673.049 5.9672201549483E7 pfteFaspum 285987982911832228.938687605 +-9062231428839601995 2023-11-10 2024-08-27T14:07:53 2Ye56mROkpe5zHf 2135070760 328206128 true -1951810776 6124818029645196485 -6782816024136161928 -6063.8086 2.94559593370166E8 vOZtxju8ZM -257931528122059642.965266534 +-9063120446477481722 2023-11-03 2024-05-07T01:24:20 bzBRbcYu04SkmFrCTe9O \N -14759001 true \N 3232106945333444880 1630633717125876859 5920.401 1.543746987740925E9 CaQjOCc3HQ -498566038428290831.713822637 +-9063451249817740160 2024-04-07 2023-11-01T21:05:28 U5LYegIWCI0KCeQSJp 1750363836 1339143259 false 1357901506 7887011114714600782 7753681767444347196 -16324.416 5.5639900523687E8 D0Mnb440L0 \N +-9064246404343273347 2024-06-08 2024-05-06T16:33:53 hYcc0THah5C -467522430 -150758348 true -758216674 -5222819688189228768 643400873152348924 28877.393 4.91292229626063E8 YuBeIvarfs -83981403209060657.789440682 +-9064316488994749179 2024-05-17 2023-12-01T11:05:20 rjsotvVEEcf9J80a2pTe -800883626 \N true -1232648067 5697723206417170973 4831320876543833360 \N -6.2413404764383E8 MWYCa8edYd -287712635663478428.556705745 +-9064637631083272150 2024-07-10 2024-06-13T09:46:25 i1HxG6pSR8EEfxbpS8C 2100746378 912067206 false -1748687532 -8641249442067931152 -9111829050080977780 -12348.531 1.638498041550153E9 1e85zt93iL -270142121976204923.556325369 +-9065113074165862026 2024-07-08 2023-12-21T11:40:06 RrPvFiKo \N 1435930673 true 683419379 4256877456622391805 -9105628329930884639 2872.0144 -1.947609547243861E9 9mkOn7zrAo \N +-9065125308346497060 2023-10-27 2023-12-21T17:43:12 vx -521907331 -2031365622 true 983278077 -7815195237653703170 6294303750633874072 26936.578 4.49109181679769E8 t6bAygpl3Q 739117332875132379.316055396 +-9065268178095334407 2024-04-12 2024-03-01T00:52:01 L 1858588277 465490924 true -208155283 8606976943224801038 -6395239027529863504 -14961.207 1.492086145575372E9 \N 988633403254351271.496433099 +-9065342384081178087 2023-11-04 2024-08-12T13:38:37 368160666 1738951522 true -1322741639 8745300745924404746 -2167219854174240745 -27224.0 -4.915041186856E8 bgWP1DOPqV -687018170913948097.197167478 +-9065551426887865956 2023-10-05 2024-04-25T07:37:45 ZplPwOpxt -706494250 2016990061 true -123937478 6995842441880189851 3017080118261598636 30175.79 1.396339890460238E9 7bX8SunnoV -171417897909066297.359191490 +-9065757162122129427 2024-01-31 2024-01-15T20:10:02 HgaS 1080630593 -107761964 true -455133315 4199810804033671029 3762109713728028653 8278.543 1.511217194385561E9 EakPIDU4EH 977254588267196759.238697135 +-9065835512345967672 2024-08-22 2024-05-29T05:01:46 nm6IaxHPrsYRkuLfdq9N -1186241077 446467547 false -341615932 439616545262097836 -6188714466304083236 1243.8813 \N xJzQrDm3qy -440051585813236729.991252871 +-9065840793352588691 2023-09-13 2024-06-11T12:59:08 QIBVg33Gam6ZOJxTuJY -527637526 -657493449 false 233529203 509362972827152303 -5391960233102367174 -9118.804 -1.425558090514834E9 ftP2CWFMVV -349768744462706495.823191228 +-9065927330731437601 2024-05-10 2024-04-25T16:02:37 IlQt -90358360 1832038763 false -1331060022 2850200232173180478 6277116058567261557 -30397.406 9.7691687396747E8 wTBmJipti1 410924783299914320.819938360 +-9066119362118043327 2024-04-09 2024-03-07T20:32:09 sQm0Ja7eqVs -1455724630 1634361317 true 1022725398 8973122898101404396 -4951405489572939134 16341.438 -1.967931333773435E9 M0Zfvip05X \N +-9066901894974211618 2024-01-25 2023-10-22T08:14:19 UzFbIWwhnJ6C 608917896 -1282689184 true -824713256 5986812846820924032 6263019961269523970 \N -1.624464695559071E9 OoJbBNkrwW 823620510324082518.698654925 +-9067385584956950416 2024-02-26 2023-12-13T10:53:32 O0bJTTHbQj3 1399309948 837585336 true -1757075731 -4530127897061448926 7503232336643870985 -9996.272 -9.13336922255789E8 QPficbKfPP 769137594575408647.504120466 +-9067444919372559056 2024-07-05 2024-07-10T23:23:41 mdE 1382838668 1726023923 false 1623048309 -4209289266826543885 \N -29549.316 -1.885112957942484E9 8FwxTa9orh 890436346020869505.385585620 +-9067752576627844986 2023-10-20 2024-04-07T10:40:52 a5i3V862HprLT -858652420 -728584819 true 1158415544 -5345040040556112367 7288389810306313573 -16478.527 \N muGF1nCeqD 377872991727324432.990648091 +-9069315856206220008 2024-06-09 2023-11-17T23:15:53 IMJ -1969699792 \N false -853984086 2703288510044484960 -6365576850254735797 7400.0137 1.300093977730883E9 tKf9CklHgE 640061875397976247.568314480 +-9069364390264866684 2023-10-24 2024-08-11T00:30:53 SKw7Wl0pkHRQJRmxlT 1325274831 -1296237645 true \N -3515040861968724885 8847332612934942980 -28123.484 -1.880882919837325E9 DPegOFDh77 238162306882978258.409026632 +-9069365493902994298 2024-05-25 2023-10-28T17:34:29 -953291866 -1750767981 true 648232989 5682304271934168345 -8170479328234124378 \N 1.771775221155465E9 SAA31OQZuQ -609894939034514910.683454091 +-9069439333394794785 2023-09-18 2023-12-05T16:39:23 yOfy -2124571272 -1495889269 true -510690864 3474520658827407622 -5830510247265457331 -7901.025 -6.9384953189176E7 ob02DhqGEl -462864999676163201.731791417 +-9069878589566313350 2024-03-21 2024-01-28T10:23:35 q5VFhoCDpilKCgm -2140113933 1205762135 true -1323962603 -3912270399799344326 2172836666053666510 1970.1998 1.449516487414444E9 \N 323260876870825224.878505380 +-9069938340772702110 2023-12-08 2023-12-20T04:00:25 S 1799173269 662654962 false -1310120158 5916061946794284925 6730045187427742318 32280.25 7.51952847507689E8 0r2mYkE8px 590185468015900819.155708115 +-9070189294841271228 2023-12-20 2023-09-10T08:08:02 jeE123JKAJwys1i 1072206862 -1875743356 false 1544297206 8327339903600793719 1316648512816874851 23406.732 1.92432845729352E9 7FpnjupHHO 103158451835983469.706078979 +-9070286288978901555 2024-06-16 2024-06-01T19:40:09 lyQBkBpPV5 -1301448274 -1935167393 true 2120077267 -140597989124500765 719185055922501074 \N 1.55272065997639E9 DDsQQ8tY8k 348618675328867579.903274739 +-9070350024021215506 2023-10-31 2023-09-05T05:47:17 1OWyfi -303923444 869112323 true -1274320621 3411065399036477138 -6190536493741352360 \N 1.677949553232379E9 cRqg44Y8st 447883296347062198.333520768 +-9070356592693090390 2024-07-24 2023-12-09T23:37:19 ad 269360101 -342660957 true -587207635 -5948650249704087063 8959838865502206552 7160.783 5.47746239731538E8 mRnH1Y5COO 932266481972301950.149648261 +-9070361202955691492 2024-05-24 2024-03-18T03:52:13 fC0VsQuRocmMWsjKTIM 529866312 -675451025 true 205030757 7784434327462076333 -1562432570225951629 26638.645 -1.945571136117745E9 C0FDNdtSQO 661645440896063931.629231033 +-9070386175174437955 2023-10-11 2023-11-26T20:45:26 Ogg4 167410010 -618482481 false 1187379738 5556471196281639402 -7963218343866804274 32107.19 -2.103740185005011E9 HaMkkV599m -699868112029165988.723354658 +-9070401787059196018 2024-07-05 2024-01-10T19:39:19 sphQjNOojec3U3IdN2 988946238 -2026357943 true \N 3691486833485185455 -2557830256534774800 27128.727 1.832042995617515E9 vyBaTilDms 785452987037814953.147408089 +-9071635428174349589 2024-06-14 2024-04-04T09:15:04 jvKopk3sGuYsNhRyE 208750299 1969736779 true 838261256 4874071926850043002 \N -17782.111 7.3892525445311E8 YiKkMazLhJ \N +-9071719132998086809 2024-02-12 2024-08-18T03:35:10 \N -485231400 -1433898018 false -1292916790 -4193896863957341064 -4541769253264426640 2205.7983 1.979926118223925E9 oYOLfNSzIb 67933975902497083.506055065 +-9071822018609053901 2024-06-22 2024-02-17T10:34:40 ihP160R0 691409887 -199630143 true 1072735420 -4849995053307349423 6316770711812347643 -17014.523 -1.654298759277578E9 y6kFnzbD9O 342503991272026921.478555837 +-9071967173256944489 2024-08-30 2023-11-09T01:31:26 Nk8q4lTm2uKn8duzbN 1510509370 \N false -1173761100 -8886732851792827573 -5039213476739586547 \N -1.729948621176788E9 5OLLHv1reT 84146606425328816.458451998 +-9072231492110687732 2024-04-27 2023-11-01T21:13:03 nyVhMd6WK777SLhPju 736297460 -1118764913 \N -1748072394 -687666661832934842 -2534088835634210283 30688.412 1.360274556186021E9 AtsruOjXsL -947060368660008316.955244348 +-9073040796774282939 2023-11-13 2023-12-24T05:43:01 eXFl4ht2QcmhdABMC0hv -732953077 -1073025578 true -965660825 2629116796790533142 -1299639992797174412 \N -9.00831589921178E8 eV2x9XM8Z4 798441349681249314.465366192 +-9073268534759538152 2024-05-13 2024-05-06T22:01:05 jjRFi -1611030135 474739536 false -1022125234 1371800914653927038 -3238609103960820158 23474.236 \N 0jxYXl03mS -788732824706801434.446185501 +-9073283014224034288 2023-11-17 2023-09-27T04:19:51 \N -1218111812 \N false 1148458997 -3276123314659206188 3331087299814085041 -23354.203 1.84013522336165E8 c31ZgdODl3 74088662054344107.478718246 +-9073512331762744145 2024-06-23 2024-04-18T02:42:56 WwOdOICUa356UJMZGqF -489828489 2131926015 false 1085821055 4093622620947249323 3752286093212478126 3817.6226 7.8058214436289E8 ReD8cpgpaz 300602899313944842.558048813 +-9074175647476358468 2024-01-13 2024-02-25T21:33:25 iw3hXznN7reD8OEI0doI 1945538511 -952337651 false -2126612980 3060181315993107677 6036734724633787494 -15248.563 1.447756824043976E9 ScKOLC1TqE 346243500049603072.266442760 +-9074201419608901808 2024-06-05 2023-09-25T18:31:28 fTCBaDZsv4t -1288979918 -190445387 false -720590027 \N -6224006337595224688 19931.887 -5.1027133316084E7 EsWyRP2Bgf 144979704058320375.175855158 +-9074220446339009236 2024-08-01 2024-05-21T18:04:26 eSE0RS 1439232043 -528559301 true \N \N 556540361348439629 11579.054 -1.48449520125646E9 kwIPbGMW49 -410776841043881579.854644019 +-9074793032113905932 2024-05-03 2023-10-30T18:58:24 qNydmpw33S6Bul -1810287466 695900817 true 1839100480 -8280354253767625170 -3461039111027415200 \N 1.272370616981773E9 aHCfto98dH \N +-9075897914489249461 2024-03-18 2024-03-14T21:46:07 WF -970717425 -1193675800 true 1919430138 -8565811314491285211 9105793002976136238 27193.375 -4.24991481616545E8 VlqhVw9oEG -369338630030166749.108171714 +-9076290872818128324 2024-02-24 2024-02-01T11:12:53 YRvSN9DhWe0pHNnSN 2109722488 -265922380 false 662447130 1001173985290840086 801541712149369849 -16842.242 -1.325355344619706E9 Hh1dQkUc5D 777224047886474285.618253879 +-9076550463052420382 2023-11-24 2023-12-08T10:50:17 jPWlG7LDESJwU8JpsA -1439900229 -1232016316 false 625971358 \N 7052323893944321797 22141.576 -9.6092660655579E8 DpjKewxO10 -620562093619537693.425695240 +-9076793178284125190 2023-11-15 2024-04-20T04:45:29 mPR6fmWkZ4 -325027398 1741924391 false 836983969 2777046704271600485 5684152887414547169 6891.4575 -3.30444737817546E8 sOhgNAf6p0 -986933635599578656.583836657 +-9076866698160492666 2024-07-07 2024-03-16T21:16:18 JI6N3YgRdN9WKPhpL -673632581 \N false -862884775 -4625729095283993721 -4244850006369884502 7571.007 -1.868548201020496E9 ke9MsZ11kf \N +-9077269206696611643 2024-08-03 2024-04-15T09:59:59 0hpRbUzQb2gv -2100794325 -2080917085 false -1692841128 3258488236460919920 1741937137747268257 20904.805 -9.32331000538175E8 LRj669pmr5 -635469728591514055.383411421 +-9078251261097008118 2024-01-07 2024-06-19T22:46:56 FSs1calgJxD5k5z8njFf 1754419306 2140204035 false 842245947 \N -6208290355999399008 18957.674 1.684762235820295E9 wHASmHBDpU 372116711589366554.605187079 +-9078692581359493671 2024-05-19 2024-07-15T19:38:39 Ad 1238775550 -489339642 false 820157990 2134973915410604287 -2415993173648822905 27355.082 -1.502753015240966E9 odZxSbzpO3 747176573970708032.808082600 +-9078733564187262352 2023-12-05 2024-06-07T13:25:50 u90yNj3 453742550 853399148 false -1881248608 \N -712471954340222320 -26487.186 -4.39865731709152E8 KS6ch2Vxof 815399743705904202.690314392 +-9079120847447980712 2024-02-29 2023-12-20T21:26 628850900 649134996 false \N \N -3812605760471681678 -32456.852 -1.900089216065791E9 qJc6MCVTcK 925411936432169169.144236811 +-9079210307340279018 2023-12-01 2023-11-13T09:53:23 tsFLBmH0 419609910 -1565689037 true -1501704420 5093003782897306956 5690387298980020545 \N -1.844773176982184E9 \N -477904278373182052.930380730 +-9079487806121204732 2024-08-16 2024-01-29T08:17:23 i4NAiVwQZ1U -1793154622 -1242122317 true 944691952 2959117336820100380 \N 13643.663 1.264218908297709E9 Yu7iIIkHcl -741758745483206718.177761404 +-9079724721668871999 2023-10-09 2023-11-23T08:45:33 3C 929559340 587555403 false 134922723 1407503748522617646 -6200118196839063090 -2179.5027 -2.093433061643547E9 \N -414639966040649758.430273902 +-9080573303861792559 2024-08-05 2023-10-12T04:47:59 z17EZyatOgoTSqL -865797457 1484909546 true 883911936 -7599423674876865131 1149987933350437328 30430.996 1.931603588893096E9 HJHdiJ9Yjw 258215048285221127.581978175 +-9080776215656386913 2023-11-24 2024-05-22T20:45:40 wd1z -1267613090 -856356486 false -1087951817 7493640465491080700 -7250435037094972053 1085.803 -9.80518420015904E8 a3lfU1Qvkp -911971621046871077.988635865 +-9081232552834899149 2024-08-05 2024-07-01T22:08:54 FHNpi 1079744770 245446809 false 1264140192 3086379569815594884 -4030609937259119341 -100.96954 -1.438822424403642E9 IfpJUqwDtg 991039903510710410.465020487 +-9081377421613226770 2024-06-26 2023-11-16T00:11:42 -1988512871 1722418828 false -1761618945 -3278967502957595486 885064900507822188 -9926.123 2.62289327555167E8 Unnp3V1QZ9 -495266637595412801.894337405 +-9081401351167840645 2024-01-16 2024-04-12T19:24:11 YTreiyOtko0gx5a -745520698 -168202028 false 863639234 5909368744703491230 -5030198824454502819 9265.143 \N IDL4Uhsk63 549289575229021492.814867500 +-9081741513077426759 2024-02-04 2024-05-04T10:17:23 Ci3uQw 844146872 \N false -1496396722 3289488187791563605 \N 28239.713 -7.38248203653729E8 cGjJaNllDT 376248477806850952.226307667 +-9081866878570416060 2024-05-18 2024-07-26T20:35:41 xpgsZpc -238409841 -1011496907 true 1833549226 8428823533474103864 2196739297281446829 12650.669 -1.382401725251144E9 Lo6Zxq9RFd 139730035461130891.289892391 +-9082109561229348116 2024-06-21 2024-07-23T06:42:50 lpVLj 597470501 -938158326 false -2006382540 -2072436394290810565 -1356218429152887567 -4448.9 -2.0833826108381E7 0oxHTuXA1q -280390937150622262.293616079 +-9082451662315926197 2024-02-15 2023-09-15T09:07:55 i1LjaBBSibABpmL7tt0Z 233694950 -1172734972 true -198811458 -6470692943868094631 -945038795465566119 21495.494 -1.768481858420466E9 5BDkSbdANo 250780227008161143.699475699 +-9082717127870794203 2024-01-08 2023-10-14T04:44:38 L -747037648 -1457950144 false 891836079 -4389182138849920198 -2450388828633981403 -12877.683 -2.019143436354894E9 Ga3iHtvAhK -182751476437741946.297495530 +-9082806341318951070 2024-06-26 2024-01-08T14:40:40 N 1666126357 -392769317 false \N 3354964166601716165 5211564930190098909 7280.0654 2.052727708561794E9 Z1U4TYfXsS \N +-9082966245373645339 2024-01-25 2023-10-11T21:11:10 NNE2cs 952377663 -750897664 true -1638495557 9133086409975529506 -4479741654684795732 11666.483 1.888405332389331E9 \N -579137980341752605.456707538 +-9083399501975712281 2024-01-21 2024-06-09T06:50:54 3 -1810190194 296637865 false 198508820 9165775783693680371 3694263294836558137 -6981.346 1.955262818419774E9 thMFNcBuRr -880599696376542322.785971413 +-9083702291063121791 2024-07-15 2024-08-19T10:52:12 hX6lvAosTsrR 814533778 -2059178120 false -1180493925 5116116146187783923 2054983498814137299 -1194.3589 4.73572309497775E8 sp1ecDcK35 -374186982957109720.263978238 +-9083781685965104141 2024-07-30 2024-01-29T19:12:33 2QKtsUck 518726146 544780702 true -1245756944 8062622579499485024 -2105495451560544882 8697.991 -9.59851913518277E8 MXQmK9ddJE -211426693085345854.727202450 +-9084034305907509434 2024-02-17 2023-10-22T20:38:57 90N9zmdNRT4SvMBklVA \N 438170897 false \N 4701317670599852918 3498413319865561125 -25363.617 1.258301785238139E9 10lE62juey -440022200940349117.456732819 +-9084692699178211182 2023-10-10 2023-10-19T23:26:31 4bK -1418190407 1560842523 false 383297128 -7774833129946337008 -4016910775410113968 4293.5825 9.66842674098949E8 ykwZquObr1 \N +-9085074542298888407 2024-05-03 2023-10-21T11:13:31 \N -1808385889 \N false 1385096382 -1987493389029204078 \N -8231.649 2.095517807834743E9 rz6rH11rCQ 89977960848937770.938819104 +-9085363385930940960 2024-05-15 2023-11-21T18:38:03 9IcIYx0hGFjVP6OmTvtg -1503925126 -972808946 true 1666567724 -4840739847650660017 -6944256628594987742 -27142.205 1.230318220530743E9 \N -342317106839701643.805196986 +-9085656019614270518 2023-11-04 2024-08-25T17:50:39 3XcYP1Ckgh07GZqr -766293399 2009026665 true 1760304024 -5799634253072933576 3019117817009346018 -30438.041 1.96940033976143E8 fhkiRxU4XA -707072496065043546.733262162 +-9085863759251631746 2023-11-18 2024-02-21T13:24:59 gfRsCdr4iRC1dDGZ 852786634 -370342195 true -1724499150 -6735560439214966225 86399557085487962 6117.457 6.33108571163282E8 JlGDcLMKEN -33974813720752021.193728731 +-9086282076925337668 2024-04-03 2023-09-16T05:07:17 2zbs -901601352 57444638 true 1374757367 -7085882800092095056 9052689408846212792 6403.067 1.697415667737968E9 IpQlhdxtse -388040816266218600.890970575 +-9086488601705764436 2024-05-23 2024-04-26T21:13:28 ISVEDi4z4M -1553301240 1898180590 false -991476880 2081827356065719055 320760343601597950 -29777.7 -1.100162203577489E9 jW1woKZe2S 866562413386453006.968290142 +-9086568178055738190 2024-07-04 2024-05-22T23:36:08 Da -921494219 \N true 2121886189 6933111629623294127 -1326308335505530989 25833.387 5288656.143035 Ki3G14X0j5 49403899684462591.134590101 +-9086660628588499500 2024-07-02 2024-02-26T15:51:15 N90M8rqHwuJh -363673677 1554611438 false 1224582566 6615019619588284152 4488689486597178549 -23595.879 -6.24232938405987E8 NMDul6kHpt -932973079178633318.806013046 +-9087450196980662913 2024-03-17 2023-09-25T13:25:34 h 300168382 1227041408 false -1741925096 8732977462248702970 5642161937436950853 14116.449 -1.593575233199675E9 QvjAK2wpWK 774634113602832442.962010065 +-9087587431299213700 2023-11-03 2024-02-22T05:54:59 qLPGNo -979067400 489203467 false -1589648917 1891213796349812722 -1363372909368149447 7370.8706 -1.317664779708971E9 4SICOvIZ8H -337252374733299489.699543853 +-9087859271418232600 2023-09-29 2024-07-01T08:47:34 nv0JqodKdxKo3 979659327 424297129 false -1819478246 8155833497806789716 -6914194828870431894 8070.3784 -3.94918868326734E8 Gv5wQXdGkf -482761086041159468.776203102 +-9088037948536287181 2024-08-26 2024-06-22T04:47:27 -734186459 551668030 true 118425819 6900456411362456586 8236185855691076669 14071.431 -1.848471275603469E9 l2sZmy3g8P -438585733241742689.997300375 +-9088076030624313822 2023-09-21 2024-03-08T22:23:44 Gc5O5S -1201993491 1232508446 false \N 5257238357483810063 3157647617404736345 -9424.388 7.93792216355065E8 fy33sqQ0Je 914688255742362062.600771323 +-9088690461181537074 2024-03-17 2023-10-07T18:17:09 dITpTsgSFP2Ih9q 1686588750 469428896 false 1824913350 7197689734957327201 910915312469356545 22926.152 9.04935301667628E8 eAexBn4Rbe -263362768306760772.850595037 +-9088958107620172757 2024-08-26 2024-07-10T06:05:09 Zhf 395951917 1306399166 true 435834524 5399371406143708743 2202370285218568299 -18348.48 2.91891721495695E8 CU9w4u2i57 -670143791030866408.552144509 +-9089521776265079677 2023-11-02 2023-09-09T23:32:56 ppzHPS4E -1355531423 -1414345237 false -632017428 -5786900241681791344 8450000950822503931 20733.584 -7.34827421393858E8 d59zPDvymt 451320506946673928.297121770 +-9089625063878290532 2024-03-05 2024-02-07T07:01:42 tfBx6W3PAvQ -1894132774 \N false 1973788927 4500335130745761345 3367343792372053653 -5250.84 1.012768739071473E9 UfdzbkJlS2 -732356314246431384.630361142 +-9089635939251857565 2024-02-16 2023-11-02T10:35:36 849327532 396584392 true 967460737 -773764828231467214 1114861313932418904 20101.629 3.65443702728705E8 ZojDqOHqx0 8659366387182710.960233663 +-9089709017577488989 2024-07-06 2023-12-04T00:21:35 G2KMJ3maJsRWYoD6V7ew -250468528 -1041846925 true -986605102 1362792798572515911 1258842023732270194 25068.979 -7.91288883722757E8 gM5w5TwUjb 926280727665956944.444437184 +-9090002987358533298 2024-07-06 2024-04-12T21:11:58 0 \N -1714558391 false 455841660 -3177378778170731560 1285448859239018057 21230.35 -1.807531721306953E9 yB4rZqP9iS 576607744755908710.134040687 +-9090239348820009288 2023-10-31 2023-09-01T22:07:29 FYkcgatYxqOFU1Pp 2009383463 1413088237 true \N -2339067487527503188 6381000370153539182 -32447.117 -1.587391737752657E9 0AqsmhLuqj 722030166685705165.717592414 +-9090370003811251592 2024-02-25 2023-12-01T03:32:28 acZrKF0t3XAJIOsL 545486725 1779688528 true 831603997 1298773303521704126 -6575118358956034764 5994.706 4.62706458900367E8 7hoJ3hJpFi -609117291378519324.661637400 +-9090728613281612677 2024-04-23 2024-02-04T00:15:10 XqR3VaDeh6y 1901742474 501355781 true -1228278479 -8277024924740972377 8455088476054615430 11340.584 -1.926121607502229E9 MtVzq5hfvu 350747193081571833.495754227 +-9090803317975071878 2024-08-13 2024-02-25T20:00:58 7yY7V5t 1559341792 -1069660126 false -2044220391 5352410803213022643 281077084210895805 -15325.566 2.102226776299729E9 kLVmIUemjK -666340754545202651.995366643 +-9090849791594999182 2024-05-10 2024-03-10T04:04:47 qE2LoC -1055879423 -656766773 true 548357589 -3993731137985465747 -1244373823086325287 -17536.484 1.991492330060889E9 wG9M8beGtn 811327236297485328.378833970 +-9090904430986571354 2024-04-15 2024-02-18T01:50:46 5DHnO6dSeG -1455800187 1938382572 false -864426430 4792098497458010020 357588834935405772 3865.052 \N L2hbjuvwzO -386629359892561129.804353663 +-9091564439333689254 2024-01-07 2024-03-23T16:17:55 zvz4zvroHBUDH7smC 2077343460 -1263508453 true -232538476 2206401269832863693 -385817834525766941 4859.3154 -9.70654034261777E8 8holF1DkWK -181886749524700415.913867785 +-9092946813377846264 2024-05-01 2024-01-25T12:09:35 CzEpegir7TmtGM8 1071009207 1142175709 true -1863342784 8071039425009468423 \N -14212.119 1.26657515773404E9 eCJXtxdw1a 947249054717589898.864168456 +-9093346634302315657 2023-12-27 2023-10-31T18:23:34 624393234 -1134287388 true 1477276735 \N 2282642409340330542 -18578.611 -1.864829366625826E9 yBQbtIAjOa -34220340410563404.115446608 +-9093606265675734047 2024-01-24 2024-01-28T08:06:58 KS5l74 486114485 -223316402 true -281735791 788070941429379374 4050208994819463742 7492.466 2.001795842365699E9 qzV368fVDT -912994463100573274.557437644 +-9093691507768229645 2024-03-29 2024-06-08T07:47:41 On0iF 892885325 -1999457151 false -1342095903 -5600413314275602423 6725377210298710105 16610.375 -2.144964217975272E9 nUTtwCIu97 460002663260283514.856137708 +-9094207720572651926 2023-10-24 2024-04-20T14:23:19 QJ7p8Al5RReopA0Alx -1626726419 143865524 false 1466710146 \N 8861517325362940860 -11126.259 -2.102448785381634E9 n0lyZlve22 21254056664203084.947557727 +-9095153696807131460 2023-10-22 2023-10-31T23:04:08 \N -866363100 -875035411 false -852529480 -114422277750962583 -8210972951749601057 1147.7911 2.048585245616151E9 zbxdXh1sUh 103580255777625401.591702054 +-9095185218740676272 2024-07-04 2024-07-05T05:09:19 2G2BnN 1258391337 1446306867 false -1573724517 5545354946062308134 3991302989039593982 1350.8945 -8.01632998221826E8 DL9mVGbrsa 126221518533996982.152076393 +-9095956001142699206 2024-07-17 2023-11-10T08:09:30 dQUz4I0u462qwTy2 1303208166 1305449186 true -1115964484 3399086898221228999 6684840547234891141 -30493.066 1.963246483295748E9 hTBu1MjFgq -844412927569708869.754765774 +-9096076935908867165 2024-06-23 2023-09-04T06:32 LkPtaOQhky -1373425186 685440864 true 1767432166 2961681749915199636 2219052111429823450 -20166.248 1.223634440714614E9 gYCvRPBK4F 602671827405927446.877921486 +-9096946918604665046 2023-11-16 2024-08-23T07:32:33 \N -236823801 1515395293 true 1685131719 -2606333811271874274 2623484949777662087 -3659.2852 7.38300334386425E8 Av2b6ZeIWr -690235143703636769.700491089 +-9097015436017089719 2023-10-16 2024-05-01T01:39 \N -194674804 -188334402 true 1533733254 7956436225071867882 -479773829265994541 -13632.155 5.09946076706284E8 m4HXN7xWFg 854097509996486506.147771455 +-9097057044373547054 2023-12-30 2023-11-05T11:15:29 5TTrHuhj 762134038 1771490526 true -834590213 -6211074538316593202 -7328646542842046663 -354.4116 1.050349735404116E9 nn4tPskfwr 536368585647958788.433585205 +-9097230623440511736 2024-04-03 2023-10-19T09:25:26 0Dzlgu06usz4UapSY3 1776669060 -1619743813 true 893796236 -7349352760502070780 -1311595847472688088 -13588.549 -1.479745575082888E9 x5n1EOOQzX -500649596156076966.993982238 +-9097294517296363215 2024-04-12 2023-11-14T10:43:09 QvJPP5oRQI47QPm93xC5 1534747230 -1244249586 false \N 5059470188951095589 2813145364053520118 20676.47 5.40510082398945E8 ajA5gULXDA -3520497257461269.428748068 +-9097567488137862752 2024-06-14 2024-08-27T20:12:28 WejC0cqPtDHzs75 603483753 2028789007 false -1841552982 2633416171712252432 4472019558642702212 31542.332 1.859233377291249E9 LzvOgzbU3x 612880997368964006.126073099 +-9097599746556202796 2024-03-11 2024-07-01T06:20:54 Mu5lpQMJ0Mw5 1729154690 \N false 816606894 -310691716168005808 1691329368065949647 -8936.109 1.238558107332908E9 V6hGoytH6s -250504373557490559.294836193 +-9097773617161313644 2024-04-22 2024-07-31T21:59:45 WK0IvXs -107470193 1560780806 true -715384344 3016156100769482772 434224865643900166 15210.553 -1.227280183941123E9 SmPv9SDv4I -722202891089105337.129806427 +-9097840162166361321 2024-04-14 2024-02-08T03:54:40 1095788158 1543863526 false -1981974129 -2649378371940626490 6830971624702561878 -2991.301 1.36933238677684E9 E8tBezTVRh 173527615778601545.218680640 +-9097973795218718871 2023-12-29 2024-05-09T15:13:12 TkOxO8x4nsKO5nTHzbNu -360957091 1532144427 true 1753910829 -7818843102461205996 -5816267977868019118 11746.26 1.92031742988447E8 5r3ERxSdLD -289162568556283292.260551798 +-9098011598753127082 2023-12-07 2023-09-05T01:35:20 gM -1770419907 -1509869000 true -1166597187 7677867937568176667 884952860076695704 -18861.182 -2.45021308332433E8 BAJzYO0ULE -879648611080510604.550160181 +-9098203616992686416 2024-03-04 2024-06-01T11:15:35 vbGYPrnAfI1oQKvmU -326435257 -814370617 \N -1355730377 9133916734726196788 -4406911901243748100 -3007.4292 -9.7710493082349E8 LMt4IfI08e 818466291753023425.705100807 +-9098305152278451998 2024-07-30 2024-02-18T15:05:51 i56B -468003560 1471747607 false 272970915 4526334470949584584 -3776210564851149777 -2135.1858 4.89387061305674E8 X4T6HTfhBd 788530764494508494.639611666 +-9098748295299785561 2024-02-21 2024-02-18T07:41:41 GwNKc1enzU 1579592738 -140703691 false -2118363506 -6838617756417592157 1025646064486070407 -21983.238 -7.89929263922858E8 dZrOfWFDha -724173598862516946.296902092 +-9099187532443621133 2024-05-19 2024-08-19T00:03:28 MnBejC9EUzxihgAP \N -1120371465 true -1522812458 2521616964887130318 -7615175893535573589 16539.982 4.13792180551832E8 fXcCtp7lMu -522760454741409329.497352352 +-9099381810682437397 2024-07-10 2023-11-28T09:09 pmgsPrdGv8O2rr6M -498770890 -614314993 true 878109010 681938922829413541 -8838506202061701185 -26686.781 \N HpUCRiMCND 312430829639225902.252817107 +-9099580589338518361 2023-10-06 2024-03-22T03:18:17 J4BGxyNns -1940056436 -1158052348 false -1201902024 -3976953441256809019 1930349788288644854 -14088.624 1.338359290125711E9 UQ6yMc9Ity -706761151566713903.674189922 +-9099628589096321828 2024-03-06 2023-12-09T08:10:20 V3EcW 137003051 1892911819 false 48160652 -5082433354954047589 6350885752822674162 24294.576 9.11741635228737E8 \N 837357284298472019.264118087 +-9099641670543000909 2024-06-10 2023-10-07T04:26:29 SFU25 1259824601 182051205 true -1621791213 1053780525936166502 -5726654927573204010 -10881.282 -4.48865784559797E8 N5rh3u0ANB 576791671748198459.567383386 +-9099874412596317131 2024-05-12 2024-08-15T20:43:07 k 1172958462 -912754128 false 1652870272 6450981393814339961 2546518996187618732 -32542.586 2.62945956834755E8 \N -95282029045437608.320105749 +-9100199276443403595 2023-10-04 2024-02-02T04:19:06 mgdDwU9jvZgsjr -1962256977 1590409037 false 1678822531 -8894139637935984627 -5480999120922506093 \N -1.500643447033146E9 XOgyiVSPJr 835172186295559360.945194776 +-9100228947680335812 2024-01-15 2024-03-05T06:07:20 oMfI3LzQx0oDCve 1115166987 191483083 false 253117177 -4609878028114723449 6830776799217410774 -1932.4989 6.24630114239957E8 EtPAxEQoE6 746653928539979062.146462305 +-9101201171036946159 2024-02-28 2024-07-06T19:03:03 CeE12 -901081795 -361815751 false 937353101 -3226488169312549070 -7839181643611133205 -23786.643 -2.89820280066115E8 RlagHoXD9T -666411895542449547.332785018 +-9101328298965263318 2023-09-14 2023-09-22T01:49:48 Mw6pgsAOadV 884690622 \N true \N 3942011859046865545 1437661677232289899 -26779.2 -9.04875949910997E8 92js54jz6I -848714958098699404.945747602 +-9101368978333596507 2023-10-12 2024-03-16T00:15:50 10DEDdxS -1441745786 -1590141518 true -1583370933 2615603159794442269 4083354091665465397 28647.598 -3.00445074063686E8 dUqB6mtvah -260852497823295078.241405336 +-9101656607164527901 2024-03-05 2023-12-05T08:58:01 Uz2u06XF 425224012 \N false 525527456 -2433793338149603439 -170755870177560680 -13095.884 1.924500762534623E9 hKKlKCxPZs -462151613425335043.100594895 +-9102211528233161204 2024-02-22 2024-01-01T04:19:48 Vqg8cX 756452764 -866197187 false -862485476 509324913654064165 1207237103089406790 29260.143 1.62892452423877E9 Uoim2hU4vV -76614288938278584.828716900 +-9102219151460284532 2023-12-23 2024-06-27T18:39:34 EgPImMQHSw 1797489878 993481067 true -470764569 3432814301285284465 2846785191628702779 13768.955 -1.37141443752638E8 pKF9qrzR2n 236481338163692441.527456510 +-9102328745971110025 2024-06-04 2024-08-13T15:30:06 67x6jcZbZUaH8 -1010090704 -1517017662 \N -1704894564 \N 2416598096515412875 578.9623 -2.055319983206643E9 \N 31802115907687084.280091370 +-9102547852504923462 2024-03-15 2024-06-23T15:58:18 UqSneUneDE -1028171045 -1467695187 true -653811881 1001519594318263057 -5855224452434472341 29900.998 1.174704313668594E9 HjfyhnHIoZ 981477240431435555.195790361 +-9102878611228194500 2024-05-23 2023-10-04T14:41:47 h1OJRRnh0aPq 1476107300 1486609492 false 1055356491 -8758058989937806317 -6740919090562221209 -26554.254 \N WKEOaZYbOr 218123906842209848.640687776 +-9103250629932790252 2024-07-14 2024-04-28T19:33:03 owOlQIlst0qVVUybQdBX 430101478 \N true 13796533 5992783229924394153 -4978681059861896159 9601.146 -1.736279518855015E9 e80MjLtVt3 -19025822941525908.959886681 +-9103567658383602875 2024-05-07 2024-01-08T04:13:06 Wv -1051967207 1077811801 false 1264524638 7122037942090437378 -6837908635319044631 \N -1.26532137205361E9 bHZeTtLeX1 393242653552984956.294430896 +-9103606429444809029 2024-05-27 2023-09-26T05:50:50 yaU9EnFqn 268004654 889970802 true -1885350365 667344416168630325 2579076910091792971 8539.771 1.505757269206358E9 c9myzT6eT5 -27397748113551065.438266902 +-9103674095437535019 2023-10-14 2023-11-29T15:03:40 4KLszFQ1O5iz -979684008 -529388384 true 881499682 2924917132462521308 1367629677213043628 -20462.1 2.31958939534317E8 \N 463633072222411330.986975062 +-9104149878900638848 2023-11-30 2023-12-24T14:27:45 lnyFkHOt7bXjcE -2029563115 1529926491 true 1798999475 7228839144035340986 -2799744741532339499 8957.373 1.264614476359894E9 mSXon4KV0t 631820204691663108.523060307 +-9104204391159452546 2024-06-25 2023-11-29T19:59:10 mPF -251013112 -2130958046 true \N 1104168047132953996 -2883104873324008590 18557.277 1.476538332976067E9 ANxqXmXnNy 774004553127677675.168877865 +-9104227637421443743 2024-01-24 2023-11-14T07:59:46 195362395 -1286304188 false -1436854299 86013388185788951 -77691252927008895 864.86786 8.1785119042304E7 hiuQL8dEoU \N +-9104426664068043029 2024-03-01 2024-07-21T17:55:27 adxHCGwTkHJL 283245265 2142348158 false -2075999979 2980555277895071714 1327053168887280511 3520.7444 -1.909041839240524E9 Chr1LSd75H -998129514490966522.539223090 +-9105088541812901323 2024-07-01 2024-04-18T11:16:36 O7ay -669944039 -583289209 false -2033067930 -5548016706475598147 823993330290088624 -20770.521 9.96125752931956E8 3FtDi8CJfe 262665769369607247.278870299 +-9105266619119006847 2024-01-16 2024-05-10T03:59:56 6qpjVBXa 491006993 \N \N -1557532045 5449241667774704113 6897001475975871210 -7507.2188 -3.54948572048122E8 2j9h2m3qZe 921379824405703514.380770000 +-9105285728035040650 2023-10-11 2024-03-08T21:15:26 c380twElM5xMa6p -813958734 -1622810753 false 1533647445 8339255379666947064 8983833766995119173 -3465.3184 1.349058532096965E9 SXiuiBszYj 346882483007055233.551451491 +-9105618099511872153 2023-09-12 2023-12-07T03:08:20 i3wVoqpfZ -1617708166 -1005501654 false 189401700 \N -8916329409393799781 -31319.885 -1.151657290643563E9 lOYAX81Ctz 740465379020917147.573117584 +-9105822202819161652 2024-03-18 2024-08-05T12:06:05 4V 1153324612 -940466576 false -716603999 -5614370597323848261 -8283052138655961168 -6814.796 -1.002493833847756E9 lgNF86E2yF -414128711346581103.865343992 +-9106024346045123443 2024-02-07 2024-07-02T19:38:15 eg 230286038 -693311715 true 85143040 2721027706954855369 -435097374306908 -30768.23 -1.946254723010879E9 padcazIDFB 322810520612164140.867203955 +-9106725422375310113 2023-09-07 2023-10-14T19:06:17 AFIGhkS5E4k -1853792381 -803246849 false 684034107 -8987942796718016189 -6102845458368151564 -1702.8894 1.315666348587563E9 0e8zZTJiZs -807208135814764127.510285800 +-9106991510785514618 2024-07-11 2024-08-02T19:37:10 aOxbiWm 1414768286 -1209008865 false -1707530923 -8167683327787633738 8120069510539801961 -17527.38 4.5929753300021E7 TDz4WnB8Vp 514375003015334289.403300683 +-9107330420556270095 2024-01-03 2024-01-06T19:45:17 lw6K9bMfSOPKEEY9dw 2023026568 403602013 true 1418077778 -1859203886403788885 -8829216938814485668 -25647.682 1.70838214377976E9 eK5B9nctSU -894756194647460777.260729966 +-9107395354727984744 2024-03-11 2023-12-04T14:43:05 v8CbZ3t9AFY9 \N 1711339910 false -327528259 -6160077546420729187 4720618518884907713 -4087.4724 1.492344591314766E9 GBRT7oPhs9 -412759494190765963.837994530 +-9107602527979608416 2023-11-24 2024-06-30T16:18:13 AyflgsJ3ni 1592087802 -1159653192 true -1362280683 6864877473504245110 -1178961459179461634 -1413.6562 5.61348377292482E8 Le0Ipn9vUP 456032259637855642.632633551 +-9107733870328126119 2024-02-13 2024-05-14T00:43:40 CKfJpzHhTV0jN -230094166 1808950696 false 1787921940 -5003455961675146935 8165960178432071830 27464.172 -1.64116406690783E9 G2Csy30Ghr -806426478669331446.561797360 +-9108011984663427246 2024-02-01 2024-06-15T02:51:11 Zyu4PwKH6ioVN 2053220821 -998558975 false 1660435634 -7620152953862242075 -7695081221601907897 9533.163 -1.108085258593172E9 GHffBHISuK 866494604499000562.472419861 +-9108254114396605860 2023-09-14 2024-08-29T16:04:35 tAwDHman2Gxio 181478321 -1973866125 false -1086022026 2443612778218960372 7119268792796230198 4588.512 1.701821070819016E9 gr3J2Xrkvz -320794073170522254.233719631 +-9108726668395975731 2023-09-11 2024-07-05T21:30:41 NsaJjIVKPc 1223470331 -771893752 true -101398172 -3592283326543951240 1285615994534153620 1168.488 1.065551728128223E9 mS7WMaq5jN 952969592043125875.473734838 +-9108764468644688438 2024-01-15 2024-02-19T04:53:51 IX06412jACFSPCgJWYB5 -270545880 1584996616 true -1542853899 -2329321228998354938 -2693025544396613620 -12909.973 \N JA1JwqqcoV 564707625183895596.455250834 +-9109307687179100016 2024-08-27 2024-06-25T17:45:45 Y2HtKwji -46338252 462783621 true -141608293 -3821602851587047693 -3694508038928363730 -28012.36 -1.774839361069992E9 Fcpr29DXQQ -884140368129447961.530196906 +-9109943350834133997 2023-11-14 2023-12-03T17:37:28 dRLlxPU 704683964 1108439347 true 852973540 -8129925034232752230 -5684489946045494730 -7423.9775 1.467789743968235E9 \N 306504859331968594.453535368 +-9110229578799176892 2024-02-16 2024-02-13T15:03:35 l 933461906 218633738 false -660285999 6856350383183351824 -4491535578940516012 -27465.096 7.27839868493013E8 VvAYB0InBu 6328608832563069.652107078 +-9110403175712779393 2024-01-22 2023-09-10T08:23:04 bm \N \N true 91309847 45285642521575653 -7835674723764989323 31064.168 1.7118263823529E9 3gvrm5jxYQ -422976760834267617.322979673 +-9110856468191256617 2023-09-20 2024-07-11T15:11:36 JJK2WHQNHNx -902760526 -950188211 false -1319780864 -965403938659708808 -8866856186270525004 \N -1.425392726162854E9 WzrW0hhFv7 691918909598320199.104718495 +-9111014347587704087 2024-05-19 2024-07-15T06:38:30 mlC87i1PmU -1164041796 1293716390 false 1459107285 -5781957787377239541 2882386610113875789 -8685.89 1.172636861508461E9 u1v9mUoOcV -838274549320267696.675184107 +-9111498917499574547 2023-12-17 2024-07-19T23:46:19 aJtXWTr2u30O 1461322666 552847568 \N -1792772568 580825036949620192 -8347492762620600675 -26158.16 -1.487963689853339E9 JMqibbaQlD 976943993534047478.360376178 +-9111928431829528926 2024-08-06 2024-05-30T13:13:44 UN0YNarEloVaZJJL4fVu 722313130 -2075722687 false -1956077108 -7456970953162004486 -8751967560912872168 27429.832 4.13970708702601E8 Pv2N2KVTtO -871698242444079915.776718019 +-9111958184793119642 2023-11-29 2023-10-07T21:44:47 B 1078527620 900338943 false 1399473237 -5628170722320043632 -8587137480806395606 11063.257 3.93363888634553E8 \N 487279081364855115.951155178 +-9113530595498619869 2023-12-17 2024-04-22T10:52:38 pIq 2114588828 -1094300529 false -471016795 8118838879779835673 -7037148602364001242 23614.828 4.6902055364068E8 x2GAL2sNs1 890911195756083379.715494210 +-9114984847870900493 2024-07-24 2023-10-24T15:41:34 SEq 265322829 1883577858 false 1453276762 8177471364063344731 -5056036344218064108 19139.713 1.541475992014989E9 ZqHKw3lj8t 814723970944772635.914209664 +-9115312340343410239 2024-05-26 2024-08-26T07:58:38 wYmKEcVSw 789923652 -1731872040 false 1036142546 \N -547619199711086553 20209.287 -1.658874232244873E9 bmcmJDkb5u -22189901648780034.868602090 +-9115343266674059609 2023-11-25 2024-07-27T04:35:04 D2CIVG1DL7FI74NiGmZ 528751768 738335934 true -93972196 -6009168162927629470 -6957261991124220406 -12570.198 -1.642694381477778E9 \N 99052605330945472.328963248 +-9115529695689409160 2023-11-28 2023-12-19T08:14:24 8kwqNCNusqJs5OH3nPM \N 2092908630 false 575701099 7678302660640929969 -6277357277201048998 13726.188 -7.91163950239725E8 QIQQ7BT2d9 -634728087372760236.357327680 +-9116165836968117974 2023-11-20 2024-01-12T20:14:21 miDc5ynqHSspo 720070571 -1046394984 true -1853539416 6125761923526386047 2619870704319214951 21432.676 2.094208498641573E9 SHwKn761oI -13927737380427944.248882231 +-9116472900301739464 2024-08-02 2023-09-15T18:16:19 Qd10aDJD6vzMtae -591751290 -1890470929 true -1077043583 2320582340828009162 -7365458685313733403 -10396.552 -1.565927239281889E9 JC7uujjnY6 -871689430785178111.972749559 +-9116629046459466489 2024-07-31 2024-06-23T18:16:18 NNv7jGojQtxbY070 1783527891 1208758250 \N 1076659188 -1971595678812391334 -6568290752565360822 -13055.242 \N yetpAmPXNI \N +-9116741069963593738 2024-08-27 2024-05-29T02:13:36 o8P 779811160 1988788720 true 1398377983 2968094608631604897 -8880475840576045955 \N 1.031805433960425E9 PPxLSMonYP 346692376224851798.841462876 +-9117089854285158760 2023-10-28 2024-02-09T04:21:12 FG6G2CNMzYa0lP5NJKt 662908727 -485574445 true 1268404049 8546647525457617314 -5312339090939045657 -24261.135 -1.465060700807807E9 p3fhj6C7Za -730098062297640289.239254978 +-9117551602008860785 2023-11-11 2024-06-17T13:17:53 D2Hxk -1120344260 -1971386673 true -70310739 -3585630429748500235 1093319643800396966 11360.146 1.283083345450269E9 r44igxFLWe -271956599118497659.876941242 +-9118153549048367532 2024-03-31 2024-04-07T04:02:38 UMigG18AU6m1Wd3imX -1489011321 -1897883670 true 31954599 7116422971405181092 7850966316598998526 5023.8105 2.063937692671796E9 pWidAbz7Ab 481377331329270573.420445304 +-9118268484880641104 2024-07-05 2024-06-02T03:10:25 FMwkTvT8utehk 518556785 -1447595524 false \N 2498864150196025985 -572138305816120031 11684.659 -1.389421000465274E9 xMWm1FC7Ol 768351219304280346.293931418 +-9118462224496841498 2024-07-14 2024-02-22T01:59:09 W0z 711428226 -1869554680 false -2125612163 -2355777749386343228 -3147824240684688646 24587.74 -3.70116968967489E8 Oqo0eziLLg 450344826323049231.151846017 +-9118860704168288570 2024-03-14 2024-01-14T10:05:32 lk37 1304291991 -1673954006 false -965215409 -2424061288734322878 5017615024404762839 -20139.623 3.87065743541931E8 Xlu2LEjEeC -81707175404900667.622615158 +-9119016056148482186 2024-02-08 2023-11-19T00:02:29 lE3gClC9N0pcTO 595545837 -944578520 false 1710373796 -3173220730909919322 1339962481507309113 7726.0957 \N Xibj7yfgM7 -398182055155398742.453531884 +-9119292354381529532 2023-11-27 2024-08-15T08:43:43 8SIWa7JOvprh9 2017124827 -8398804 true -320442191 3866152883110381069 -4964454742382145044 -8870.699 -1.010117076335074E9 \N -338818180991016921.407381900 +-9119768995865812370 2023-10-27 2024-02-14T11:09:28 vJrzp1rl09MCM9DIPu -1897409669 -885867848 false -1072839160 -2214932807636787140 2304129689959931963 -7187.8003 1.832379689706032E9 \N -957968328986445554.764281235 +-9119874489437544050 2024-02-16 2024-03-20T22:10:03 3HE4xZ0xbpgoaQhdLk -825512361 1737206763 true -1495629573 8738130718054605484 1947799510909429640 10660.248 2.80779177695564E8 g5vh25l0vZ -446424323718350445.427440525 +-9119978994681433825 2024-04-01 2024-01-08T06:54:53 EnqgFjDkIlD1At4KPa -1110669793 163773963 true -1684963297 5321133744154100724 9135274247970031169 21411.893 -3.55069495068173E8 Fr3eFc4NVr 367169463407856264.593339824 +-9120392256013782935 2024-05-04 2024-04-15T11:21:06 GBxNCcgWHwoas0b5oV 2086297581 1638250034 true 1773977891 4563349610087562429 9123095606889606992 -27682.375 1.533518770317614E9 6zkAzkozm4 104199306090179852.457851022 +-9120790782980022518 2024-04-22 2024-01-30T06:42:06 SuCkk3LzH617 1177384179 -1084585006 true 1802153427 8015977107050298473 -4682827974335574765 28730.727 1.376213119153162E9 NNo6wOCClo -172948809225808639.458237279 +-9120900913563983276 2024-01-10 2023-11-18T07:07:02 d6M 1088923809 35617150 true -1071396271 2906033303858158450 134039653132153545 -2433.5964 -1.895566189775841E9 fG7NODT7fD 383972708186263255.995871902 +-9121068258841170432 2024-03-02 2024-03-28T07:37:40 \N 539009059 1967622497 true 1294223891 -4519221790566799595 5058776588317891121 25105.365 1.179351519577797E9 11Y1wxbNOO -68960699305496577.291794927 +-9121365391480391997 2024-01-06 2024-06-10T11:10:05 6VBh -272474572 -1633753170 true 1914154729 812705401779980939 -642748556151894723 27715.645 1.470292729955261E9 Ho88DnzdiB -416332401016026383.530435046 +-9121392378151783234 2024-05-29 2024-06-05T14:13:27 \N \N 1143453506 true -1625716367 -191500277097758273 3281437069320095108 4746.7285 -1.783137570969667E9 aVdgns1bdn 748116323635665328.661200728 +-9121467632077396609 2024-01-30 2024-02-11T22:19:01 Buiw2yN6Ml0Kkh 473958668 1023066939 true -1895764365 3464028668341221559 -3064398305314416637 -18557.54 -7.55209255269699E8 YTFEtMmd2J \N +-9122005994284589678 2024-02-22 2024-08-01T22:54:04 E1qB12TdyG5WC -43514912 1016139422 false -646545601 3809719865406900734 8916253654602338593 29237.363 1.022814944419771E9 dTb3iC6v4m 100760964120399404.438027885 +-9122835140926088288 2024-08-13 2023-10-13T02:11:39 s3dQEPrz1uXWjwSH 1993985360 -998153593 false 913742465 -7423433302588738288 7068254509723603032 -5602.951 1.182123072624977E9 yyWvoincWo 330825162351132566.581051165 +-9122982359947756441 2023-11-04 2024-06-03T22:46:25 LukhSBSk5X -1310802170 -1338136273 true 181181616 9079617413490011900 -2887321703791118827 12930.755 \N GgHtsoW3gb 326620377491272156.970209301 +-9123018120775465973 2024-08-15 2023-12-22T05:54:36 srH3DjifaL3xypQNFOn -1786594342 2141596109 false 215870495 -6042795984441616868 -1546893064583250613 23600.375 -1.575075698139582E9 4P6b8dPMEw -165488598929982699.983052500 +-9123062266729491508 2023-11-12 2024-07-30T23:30:22 fgfGW 734431332 956186007 false -977285855 -7360520148082637337 6489304676006934839 23442.844 -4.42008856911092E8 d20RJ3gMGY 635711810257772956.325921307 +-9123117477309047325 2023-11-07 2024-04-23T14:14:18 XojT6HvEb 565054571 1948511887 true 1870749405 2900734231686127737 689680066181156321 19455.713 -1.68254908462979E8 ehpoiQFs4M 453549292211482796.506127332 +-9123342282592499314 2024-02-09 2023-12-26T23:46:44 yKlR8gZxBDv9o4GP 2141822887 832583571 false 1508304701 7918818151193932866 3946131112429288894 24074.393 -3.25920174311363E8 xWn9WsEn3o -355305886709316236.794828379 +-9123416519333307680 2024-07-06 2024-02-17T01:24:02 \N 1432855866 182286290 false 1455485390 -6979690297547762535 -6049542540840124822 14722.206 -1.959548143432403E9 ovyTg0qOsJ -432149103521357223.575422792 +-9123705460857598248 2023-10-16 2024-01-26T05:14:01 ZHNOh1y4v3UsuLtjxo -1346682406 1332347238 false -1126038311 \N 6046130053240358466 20882.96 1.829203938894726E9 K9n6HjcYvo 732883380194202003.643583401 +-9124055103020788425 2024-08-29 2024-04-14T16:37:13 z -1693110504 \N true -729153535 8241170428097656816 8350168926328858947 28084.938 7.93349227839734E8 FhzCBy6SMP -739998879745386908.103085352 +-9124158125049125096 2024-01-03 2024-05-04T08:31:38 sABJq6QIDnH0gD 1990396481 -713679512 true 303791079 -4671142429879423270 -7660864143984146274 31880.186 -5.9993161331584E8 rwjkC1brOS -685394009094068735.778600902 +-9124247423226236883 2024-01-22 2024-07-04T11:51:40 hqgUXEgVriV19Eo -1973184221 583260334 false 264322408 1542968462732574975 72397924335607738 -19041.25 -2.6217295396897E8 6UymQDBFff -261045361267042596.738663193 +-9124299695828295497 2024-02-24 2023-10-20T18:10:59 ux2 -1896361335 1622754079 false -638923854 -889900479685319246 4189333996744432191 -26880.758 -1.097954186017307E9 lCAKNbJUUk -78936671623803268.989057286 +-9124612487244817829 2023-09-20 2024-07-23T21:50:40 1s69hb7 -898342816 130495756 true -2094098904 -8744259124999938189 -2127135330286972073 \N 1.34043112132669E8 H3hMNMrqbQ -646780181447945651.770382928 +-9124700122508431883 2023-11-28 2023-10-28T09:53:43 mx0GaC1Qe5 -1438969397 1793459338 false \N -6073611721879512244 -4623286759560243024 22696.838 -4.02059917516718E8 IKO4bm9Xqm -356815205397708584.617184128 +-9124703394893349000 2023-09-17 2024-02-22T15:32:36 7kL -1245837196 1805683278 false -1049207284 -3297909919184570556 -4822724463925651378 -25808.447 5.96989046598004E8 9G9BEXuV5R \N +-9124776125680451798 2024-06-13 2023-12-12T14:09:37 NjPyauG44cLy19mnNirC -1513310468 -632741877 \N 271528047 6940686977461603240 \N -5335.5244 -8.66695161413277E8 H0uq9Gbkqk 528053646058095687.717706481 +-9124896543267244972 2024-08-11 2024-02-18T17:27:14 \N -1015793526 -1716972734 false -1097413989 3264595230418568009 5333665472197079067 -23851.062 3.17595429717224E8 PrmMn7w10X 583909392039017448.490468437 +-9125603845190878036 2023-10-02 2024-04-03T02:01:37 pj -1315195886 989455149 false 899251469 691276160299931874 7682005843853609372 -17714.033 -1.045413865110766E9 JURM7BdL3J 527667308972572490.151273667 +-9125692248793397773 2024-01-24 2024-06-04T15:52:34 eGha5OhRafiPuv0 1459424644 \N false -1332128290 -8993656108260467479 -4703855582174247854 \N -8.82453319765526E8 nCCDr3if0l 222500132093211410.782729475 +-9125893086427325314 2023-10-03 2023-09-07T02:38:29 AsJQgpQhDnceF5T03j 1340405704 869571485 false \N -7960900415482932138 -8665271232692554242 -13504.251 -2.019732255816797E9 grvWh9jshn 285808317549418439.397513366 +-9126243593690335375 2024-06-29 2024-04-07T17:28:15 -1710431481 994738448 false 622262205 2380036773315609253 8876959258105761287 -10065.896 1.488088674734833E9 ieAHYnNfg4 840850636485957651.947399949 +-9126358563974769279 2024-03-12 2024-03-28T06:22:54 3fX3fbp1vy1IN8pfbR4 1013141613 795893131 true -1146477447 -8710739826692561816 -8522209209171590826 4992.332 1.640157552703031E9 DWN3MevWQZ 115638824240153132.718026109 +-9126489289894567672 2024-01-29 2024-05-05T06:30:49 -255508532 -2127482364 true 1032543154 1338629700742584390 -4544666837355654245 -21924.943 -6.18446175682744E8 9BfnsZYZWr -836719483907158805.273961704 +-9126634385600818406 2023-09-01 2024-04-12T21:52:22 VBpee4YiVZ7bmKueq -1869779406 734644987 true \N 3495077042836696487 4178316943179125880 -29080.156 -1.328442202276551E9 SONXdH9qgk -264228199498914427.150748700 +-9126768625525172346 2023-11-25 2024-05-26T16:09:19 BMG7bjV7I -164700661 -691231782 false -296910215 869936346988289190 -5153597178358384086 -32479.125 1.202858906296794E9 9saTAxDHG4 -526064475185175176.433483403 +-9126886120528590585 2024-08-02 2023-12-30T06:52:07 BBokNNOX3DCZ -1677190112 -913496866 true \N \N 2038711733196087324 29083.92 -1.45669685237782E9 WTsGjl239v \N +-9127200037705875316 2024-05-24 2023-11-15T12:18:29 Jh9NrrwiQNw30quWUVq -202923760 -521837588 true 1230997536 -8489823971771339153 -7081586355728530706 101.64552 -2.3429451590598E7 lR87bUfy9D 435817801099033018.667383843 +-9127588803574115065 2024-06-23 2023-11-16T04:11:12 n7ZnPLXCxXYs0CWDNZw 1812133850 1999106045 false 1656038996 -4226273296375247815 -3651118714164138279 -825.6992 1.645119615646853E9 Gk6jr2uCY3 472463048784001676.739702163 +-9127890399652716007 2024-05-01 2024-02-22T00:40:50 -1849479916 1144794084 false 1862643997 1855497679238627193 3769918615884070748 -7716.7817 2.80291146579572E8 qvwfWJCWDu -254045324563242606.402661250 +-9128310159219607489 2023-11-14 2024-06-30T17:57:40 bow5yzng9yoFQlw -1513816558 -532830872 false -1562667626 -2122884284277808698 2212983753773954534 -18842.498 -6.55338507911013E8 0RR4NlF90E -537673775911520503.811575915 +-9128345351105769191 2023-12-26 2024-01-19T01:45:11 Wsw1F4qq -1385108467 -344790648 true -529072816 1403062333680098957 5605056905253058100 24033.158 -8.34079286058465E8 p7USCCvIyC 438229197854038478.977846151 +-9129000153645600183 2024-02-28 2023-12-10T20:12:27 gt 512928890 -1086167226 \N 1531661494 8557967919516629731 -3208660686991607506 11202.906 \N lVK22ncC8G \N +-9129399108249056554 2024-06-30 2023-10-07T14:25:12 1378049709 251960014 false -1625261848 8399800248143421465 -5741495883373461442 -26845.066 1.88687445823503E8 mBtDkJSpHZ -26980394024499832.439252255 +-9129436217721742372 2023-12-24 2024-05-28T20:52:08 J74IjyJecowCDOcn9 -1891623137 -1082677732 true -2022225192 8433518975199011703 5027490904646921802 25851.996 2.83717700998213E8 S44MS6tWzf -485459976776159379.473978336 +-9129671474482757835 2024-02-10 2023-09-04T01:38:42 \N \N \N false -532358396 -9075517321257135710 -8647409634560067266 -19180.611 -2.90446321112625E8 \N 649293142337344548.660531245 +-9130131201341436240 2024-08-10 2023-12-23T22:31:20 Vf8YoPrBkGhL1RHcnNVB -523583650 65900621 \N \N -2173209080087954894 -6875567506311831523 -22594.912 -1.478551452208022E9 2xeGB62kzS -261340857884871140.136452622 +-9130287077948588437 2023-09-15 2024-06-03T08:27:24 qgF0BbkrCfibJxud -581311832 -173804519 false 554687665 6148203616100941942 \N -8505.316 7.0518223729915E8 yYskacLdFd -797006687478846887.770773735 +-9130299552649073752 2024-03-04 2024-03-12T05:40:16 sbJMbRrEimKkPfLHkzG -2087640039 -868816854 false 1788208529 -1632701369778901877 1329145866253954304 24685.836 1.707096724470201E9 JpSeodiLdT -654578436639306350.603766264 +-9130342905516347739 2024-05-07 2024-02-07T00:19:40 OQ0KK34OyjX8U6tVvlR 919307927 -399638204 \N -1926297529 8723981155669879929 1472263204377280011 23038.1 1.986494371300038E9 OnZS14W3lt 820348738369224089.481400663 +-9130609044154872808 2024-01-16 2023-09-18T17:46:40 rccKFYG9k08y6Uf 648802537 -1729573696 false 1814584743 5695469820056053435 -661549469019065179 -29323.703 -1.727566949662153E9 RfzuMRZ2Bp -240715275151316432.282566091 +-9130880437400886529 2024-01-07 2023-10-02T02:07:50 3mQXhC4X 471617953 -1529110415 false -159535882 \N -7838375693709729126 4561.503 -1.669230017707436E9 nN31sUeazI 181511971767508913.117498029 +-9131226516262025572 2023-11-12 2024-05-29T14:34:52 KbQsEtWrS 381535529 -1276423434 false -166128669 8719655889052336997 1901867936711053921 -24037.71 4.81921376001442E8 ID6bgxN2kz -15765970327168736.736103830 +-9131325910373045757 2024-04-05 2023-12-11T18:15:18 nUx6CP 357096791 1376947108 false \N -8134560454415609049 5087603542484387191 20712.45 -1.46110958614021E9 9xHwvCYP7m -822454511354644105.960201307 +-9131697957755838581 2023-11-30 2024-04-02T01:51:02 Tq1i 451178567 232239783 false -1944819244 -5888296479734174194 -8801971254777964400 -17635.72 -4.84990484884165E8 svu4sNUiYB 386701372710864642.333936644 +-9131876685444143355 2023-09-15 2023-12-09T16:03:27 UUGLGE7tl -1739001459 116569796 true 661419513 679788136149036133 8576983224319856242 14790.637 \N ArV9CrfPH5 -763887236814206622.927576883 +-9131968785443470630 2023-12-01 2024-07-19T17:59:27 V4QfldjzeZ8PlnWjvUpE 16483204 -511795507 false -13380765 -4627102480357041746 238564613538703573 -15923.141 -3.22891496326491E8 0BKhl2nCrH -817078596891292067.625987659 +-9132086608532375399 2023-10-13 2024-07-26T12:35:03 kZQg4jbHGx1XhBqIRrU 93563792 142557801 true -1663305765 -5064780915847306125 -1680640388520788578 -14677.545 -1.389443319030319E9 ZoDE4KVSb1 367012209790430426.856481651 +-9132156146303419513 2024-03-31 2023-10-10T13:49:43 b -2106062405 1437211296 false -1727262650 -8336828180934988230 -5792670919881684241 -32750.309 1.197294650302519E9 7VGr088oL6 -765185625036364994.275916479 +-9132444163235268026 2024-04-17 2024-08-17T07:32:23 \N 1908627339 1643350123 true -1623209843 11904668571398445 9075743604533199642 28618.232 -9.34600150912434E8 PWXPyZM64V -975063327293771382.674080872 +-9132620386328578708 2023-09-28 2023-11-18T23:09:41 nUXTGEWQyy4A67 -1419340552 -35248641 false 1358188883 -3303484547565901127 914844852718077768 -26949.617 -7.48522461140488E8 iPcX7lZdlt -517219872918518702.786938463 +-9132636078843980606 2024-08-22 2023-10-25T06:51:54 X5vvYQsCKDExL8XjDe1 -1418442686 -1697148642 true -591161676 -6443716291409012576 3343396000778834360 -8486.129 4.22033541356151E8 5md7pgLJk1 322230529427311692.729248764 +-9132898233876608479 2024-04-26 2024-08-16T17:59:12 RgV4qxIv0 -145386099 -1217062619 true -1907221679 2062560370488517849 2836279077623394889 29324.145 -1.582578791266385E9 7wsTyJB284 -297129431478763465.286539188 +-9133175366132361100 2024-01-08 2024-03-15T01:25:23 JPGTTa6usvavhiXw -273239709 -1086169359 false -803007227 4599978682347693860 \N 30983.525 1.635345078566954E9 SpooCtGJw0 66593387387664200.643696093 +-9133457022464166508 2024-01-24 2024-02-22T17:28:09 \N 2053265561 1353065011 true 1048955393 \N -8387981787031242529 -7394.4526 4.3805846359682E8 nLDtshbND4 -199949933225124253.969483774 +-9133570773788617419 2024-05-02 2024-06-08T17:16:47 0z3YVwgRgDtbfP5ga0k 1968555180 941661883 true -2082871235 895248133192256511 9125315751739741778 15843.473 -2.119122894629265E9 5JsmcDjWhO -320914283068323138.224548620 +-9133977009652020808 2024-06-19 2024-07-21T16:25:49 ZCGU 1167147298 673402503 false -1897498461 868431306876471749 -9117863466532804298 -7856.843 8.08438120117689E8 WaQe4jCcou -812729938563937860.772695168 +-9134593776424653740 2024-07-17 2024-03-27T10:43:18 JXnAmFRvF0GA3p4 877971431 1970349169 false \N -4789068538364440788 1880358453467500727 9010.837 2.082560022731881E9 VmQ60w9dh8 -549616873856999813.193272927 +-9134738616465194654 2023-12-30 2024-08-09T18:33:08 3h5GCveCHjmy06n -1876396970 -1349134699 false -2065408425 -1452343629650276254 8831368993960577891 24559.29 \N sX11G4uu1I -83237013144983546.674382861 +-9135062250297272395 2023-11-13 2023-11-13T04:51:40 o3VJ -1511229344 -698527587 false -1409763178 4624492299150437842 1234263419833044889 -9764.636 1.18411236685078E8 gF7bj9yMq8 769948016557577201.735607639 +-9135180146226956168 2024-04-08 2024-06-22T23:37:59 IxnuUmvlrTNqtL5jejB \N -100129332 true 1761956649 -1927171228296869540 -8617977117199508620 10589.971 1.519674463157569E9 QsRVW9LWps 398683852634952904.839140097 +-9135446276657401672 2024-06-08 2024-07-10T06:32:54 aQcmA3pRhFSmVY \N 1802508728 true \N 5143146975111193910 6797153380810074825 8541.639 1.412348030661856E9 2rWYxwtDr1 406848222396850223.187017334 +-9135806057965097043 2024-07-27 2024-08-01T03:40:28 kFlM -464654920 1246375711 false 890818359 -5493318122966141782 -4042675306676284525 29317.303 4.9952540161762E7 hMhnrEd0u5 77979333647962484.222642941 +-9136648983042875268 2024-01-31 2023-10-25T04:26:30 0 -1273593172 -1163830240 true -1462515904 1983326336045947352 711442571219304097 -14981.516 2.12175257659026E9 2xyIrmfpyi -750004646862553273.689916045 +-9136747661695890674 2023-12-31 2024-02-07T17:08:42 3 1991875701 -447311697 true -1693299494 -447668861952833311 5761240517653741535 10893.415 5.59964705664559E8 NmpGglQPI7 -20433200031539101.611910456 +-9137128641964473539 2024-08-23 2024-07-06T18:40:28 PGJ 1202176575 -1746989190 false 1639389035 1861655781025485789 6574421143581186652 -31541.723 5.55535481147603E8 e4XxsJ0AyP 796122623870373624.634318610 +-9137191092426992821 2024-08-21 2024-08-11T19:28:29 d8HdiMy58p -1746260647 226870088 false 1095789988 1256502291010236376 -8537561861385306210 -22612.312 -1.497982100350297E9 brYVTCsolQ 998941645575681286.729058711 +-9137577566714328984 2023-10-14 2023-10-29T00:28:11 p0gvW27DrzSIqKqruap 648908697 2048349932 false -1770363732 6441548814930295033 -3615931889564031677 -20820.062 1.26644645685269E8 OYLbwOsvo5 -862623822609559414.236180113 +-9137697020274218506 2024-02-09 2024-01-12T20:35:38 1ZUWLSN3aIIT28PA7zO -1598562790 -1434760226 true -2010244397 -1141668878070557930 -6489040617735960467 9596.306 1.56390120361237E8 YyPOP47GCJ -448958685862489139.474809293 +-9137714746762568098 2023-09-05 2024-04-05T02:44:32 ndNM6iMqdNg2J -41213412 -2026131689 true -2117468692 4435368519061222764 -3907239008420380950 -13682.639 7.46111015344548E8 rdQxJPBruW 884396554487607622.456623740 +-9138041401250734403 2024-04-08 2023-09-26T18:06:49 6iRf5C42yayc64OiiC9H -418862159 -1617234376 false -1681060919 -8544764105298332255 3424095417493493861 -26400.705 -1.155173912469368E9 \N 734433805329474722.697744060 +-9138291568507605620 2024-03-27 2024-03-22T04:37:36 Z1YWz -1338770718 -1973970069 true -708262053 -735408657014040051 -7124332110908120557 -26884.697 -3.18415642120758E8 XxIwZEuX1d -94036927576427341.967422923 +-9138552006744135327 2024-07-20 2023-10-21T05:05:22 ZaNddW0xx8j4 772548907 644972686 false -1084605188 6814791844794942044 981422248836662454 -28821.865 1.880151310196803E9 Nsm3wiSsrD -364107641010157212.503939017 +-9138761213063720760 2024-03-20 2024-02-23T21:14:30 9sslxjw1wY9JCoOr 1586898685 475188793 true 440338377 \N 6870591579888864339 -18271.92 -1.791744712114061E9 BqAPnNNLrV -6160858435725915.596589736 +-9138787957513460261 2024-06-12 2023-10-21T09:51:50 WskAo -1402543736 737479409 false 26772094 -7857364415040112631 -73852230514718298 23349.201 4.84522944415325E8 Y7YiAcF8yf -317615851903037194.283869241 +-9139028549724996573 2024-06-12 2024-03-15T09:11:54 \N 1389158115 1546149073 false -963251827 3840234820242608151 -4836986771074560196 -4233.672 1.633842950759301E9 VIDPvIWWfT 854951821864254257.718716800 +-9139500260249525868 2023-11-14 2023-11-19T23:31:35 cGyXsb3X -1458386200 1633380203 false 871477964 -3741278391878000232 -8560085094851526009 8180.6704 -5.14291598023914E8 NYEaOUlWzg 831345824491187409.266232526 +-9140354325235001257 2023-11-20 2023-10-29T04:17:11 rhDMHirThGPjzj 1624722258 1700661291 false -217377192 1557751003779525330 8580097683144006073 24528.842 8.71252688299747E8 ww1acr5fPB 457370288259214220.357903394 +-9141156147363082082 2023-12-20 2024-01-31T20:48:45 wMMqmS7B -310825365 5098151 false 1433771758 5223821528552219073 -803523014745753994 18911.273 -1.922876399205592E9 UwXMKXT02a 406729790858643879.521557331 +-9141162001080337603 2024-02-06 2024-06-24T15:15:07 sB2hUKJPOcl -474154864 1401730122 true -157747955 -713005572163264271 5412246006179878015 -5272.3184 1.54561181833177E9 drdOFVgHYU -781453628141657521.918391750 +-9141355754985642518 2024-01-25 2024-08-28T16:27:39 qL3ihuUxPIA -1202655680 542409576 true -1963468588 -1471652337974188044 -8990339892358139052 -10112.394 -1.59409619354676E8 deNWKAyvC4 -270754245691813701.340170420 +-9141428545647256349 2024-03-24 2024-08-15T18:57:05 g -994911986 1371337631 true -1259437657 -3814379584191711490 -3695531696418720667 6955.814 -6.15573398601871E8 pZ5v01LB2Q 86364673335327096.253284253 +-9141463980314095254 2023-09-05 2023-12-20T03:25:13 gxikNC 766349224 960808124 \N -1620994685 -785736349080327679 -1962508083876522796 9533.957 -5.48591731954805E8 S08gLDMCK9 461593049563923762.231027797 +-9141656123043359223 2023-10-16 2023-10-26T02:11:34 FE88MqiaiW0 -888710481 871663187 true -1203889825 8213753051202947831 1659301648453262253 -26826.812 1.155918349965722E9 I0HcEnb3NH -970298841554789097.333373875 +-9141992387932515271 2024-06-13 2023-12-31T12:43:43 LICsN 443816951 740690970 true 482953202 -8840483393203914418 932133640344047822 3310.4087 -7.90316006982968E8 VAwNgSrsAM 80301902872322519.755980462 +-9142394803889844508 2024-03-15 2024-07-16T18:11:58 PJ0kueV 1214676787 413234662 false -1373924137 6840398879486875041 -7188631786598721804 -16826.988 -8.8374418955324E7 B48fRTwiEk 558679226179693677.762421306 +-9142925312969647463 2024-02-22 2024-08-28T03:44:53 RvSbnnh -47502619 389279949 false 308096089 -2063795838550901484 4754450667922195537 17073.137 7.37097921973917E8 L7ftaRx1q4 -892028914534126249.586268003 +-9143003438216375162 2023-12-12 2023-12-26T04:46:03 ZLD6VYIZFkrciX0 -2027857016 -1977190387 false 583732619 848736968062490569 -3372284994147784580 \N -1.774509635507458E9 6Spvr6x18e 763095490186412815.472349887 +-9143088501200826214 2023-11-09 2024-08-28T08:58:52 YbllrF2D8fglE5N8t -1231021268 -787615065 \N -1419691082 -4339143219472469621 2170711768090366462 -9468.277 1.604399326787887E9 70PKVBJ8po -821972481077289685.285809289 +-9143524869441408200 2024-06-12 2024-07-11T20:12:19 sh 1210962197 316873608 true 1193475286 -857655435937398769 -6540003749460725241 -12315.107 1.482392609554851E9 m7dKJSrxUv -328701669786717596.623963062 +-9143545719852932252 2024-08-12 2024-03-20T07:21:03 s -1444716680 -1367672082 false -2076870099 487869690509332602 -2497669242885297109 -18220.936 -1.173522947803429E9 cneYc2SaEs 558776580743940462.906906927 +-9143660326729335131 2024-04-18 2023-09-21T01:05:45 IbfaBbil78mJ 768907957 -716706299 false -46605800 -4950261122309621969 -3968965351353006512 31444.791 \N wXgF71aM2Z -666334726524499895.769777948 +-9144371663031597620 2023-09-02 2024-03-29T18:10:01 1823810252 493050447 true 1232259577 6618721736798433892 8981917805892058609 \N 5.69453843887658E8 jnpv3I5Di5 -301768632257234033.381838819 +-9144518883495293817 2024-08-20 2023-09-21T09:39:47 2yEMFm -1383878392 1035694950 true -1483700407 -573227315044185069 -5699782240002247649 15802.699 1.249078208024545E9 KS4YeTQJex -339783512822360275.142247248 +-9144700943873132792 2024-08-27 2024-04-10T16:53:51 -436558407 1474502468 false -1141001057 7337108048267053443 1172502075208846028 22392.08 \N TnZ2BtglRD \N +-9144766247784993490 2024-02-21 2023-09-18T15:58:51 5 -1194494780 1142566660 true -1730490430 1756336794083314620 -3370897968992267392 -7425.3525 1.881354407079478E9 qb5b3McJXA -537531470727364041.668423786 +-9145187360727679900 2024-03-31 2024-03-14T22:00:33 IzHPDPNN8 413208293 973993277 true 33081017 1060884074486957477 3061028977130932775 -4294.5073 1.190613572758832E9 lDFkq25TGS 791640326393984858.432876610 +-9145282030016867888 2024-06-05 2024-07-17T03:55:34 0FFo2HwDRL4nie -1436995659 -190541098 true 368979493 -3932826005613000165 -5194867223452225186 9837.171 6.95069366028121E8 QMmUCJKLb2 -858012403235946875.704635149 +-9145993408915638190 2023-11-28 2023-10-14T16:18:48 vypub -2055988157 -307234665 false -1920501872 -2942404852216228116 -2182904527708270559 \N 1.911380335335924E9 9gM4TsPFxI -98154096298368794.483859216 +-9146254856949998248 2023-12-29 2024-01-10T10:24:42 OxKylppa2iA 1126346057 485421759 true -1209201778 -2134654755326517690 2885781961087428085 -22027.768 2.170483809848E8 \N 468200121195625631.494962545 +-9146436426853099115 2024-02-08 2024-04-18T03:54:24 ta3nN 767027327 -1378454680 true 1810294345 -903273790471155134 \N -27116.734 -5.58082492303848E8 aJ7z2pnbT3 155083896088222525.548636866 +-9146440369896871358 2023-12-23 2023-09-15T10:15:08 ALmEr95NJeKKZU9GTeu 2091009633 1474532720 false 1300450417 4536784885063272325 5284107710272155962 7935.637 1.33122802202852E8 RtYyZXYwNi -97970738801463608.988779774 +-9146599997522014384 2024-07-27 2023-12-19T06:02:24 gqmas3nnNd1IHP -17644959 -489920152 false -104548711 3370691154097148987 -1271373064790854944 -7026.0425 \N uzaBdtWa3E 815075040105326874.393457575 +-9147094690453408240 2024-02-08 2023-12-18T08:11:52 x5ULuk 401476886 -863893236 true 912882821 \N -7808939621963366261 12939.453 -1.532936807214646E9 jw1JmiApRR -939020588636634874.987962478 +-9147543884194430382 2024-04-15 2023-09-15T17:29:21 krLq 1657668188 1798014671 false -2026945306 6411228762176355190 496601282478399218 25961.535 1.199960346192629E9 YRlMdcDmgC -497738291483097778.453451642 +-9147580010317846649 2023-11-28 2023-10-09T01:53:34 MrHOB6 -357077998 -1603043276 false -784733781 6482959609756518949 -4926556548092720885 -9991.357 -1.84500262662033E8 2nmy0zQRUA 310976130998211487.234811529 +-9148180627180433849 2024-08-01 2024-08-30T00:06:21 bSYOSXBqnpGtxJs0 -665408059 -1181048471 true -421680274 -3650227093054633518 6863652190014347162 -10672.09 1.451399034494627E9 Nm3XHEzl21 -663476948490436292.332248608 +-9148226218997807295 2024-03-23 2024-03-05T10:20:56 PL -881230186 -1792363038 true -191051368 7326445214993401174 3467530801127245899 5685.085 \N OBvlxT8EkW -426037587959716403.993928633 +-9148670166628453063 2023-09-16 2024-05-20T03:11:58 -1956011897 1233699009 \N -878862175 7847285129682338486 -4158066812556202684 4737.2256 3.52623179367927E8 V1oEFrw4Nj 803963357045013090.239340683 +-9149371158515095191 2024-01-04 2024-05-05T00:02:20 2pw6c2Q4zEG9WjlpCv -1638442269 1417216166 false 702775356 -262486244695157251 \N -31278.027 -1.257389496371118E9 FfnF03JYQB 772349256499899418.227316520 +-9149446697261075668 2024-01-24 2024-03-07T10:29:38 ablbj2QUZAnQ3D 149529412 2112848878 false 2143397283 -5361601622464656445 -8172099414799865515 -21718.744 -8.33085484162264E8 6mqMpEhwQy \N +-9149491508696561525 2024-03-10 2024-01-31T02:05:10 nykatlvwKMQ -1986996625 -1368474614 \N 1344431509 -2514797054965988808 -9183851707576174395 2346.1904 -3.04524030366302E8 iCHvKoITbs 206169700766707391.457513910 +-9149673307721937905 2023-12-24 2023-11-24T07:36:12 YOz7doKd8 -1230363065 -834633725 true 820119153 6123912144157301048 4666766727400458333 -4716.2144 -7117038.367711 ermwmcL8pV 280086782055024317.701303595 +-9150115195774742116 2023-11-12 2024-01-02T20:29:53 LWQD -261215253 1768084753 true -558931806 -1056967781549996196 9131843493190501029 14410.191 6.36707420593269E8 W5TuIFqZKp 318409725651895142.166390154 +-9150932200757070087 2023-12-20 2023-09-28T09:08:30 M 1770455179 \N true -1360286619 -2064448775467328547 -4529213217563809484 -812.7237 5.741189108707E8 g5UnnI0W1z 962712866341557529.186206282 +-9151388286065637076 2024-08-06 2023-11-25T19:00:40 DEoBc5lLpvGiKKKXQCJ -465910705 810322082 false -1419191640 -3182794298954746458 2747175217854426570 -27192.168 -6.70133246150463E8 rWPhTsJtqK -206699714562639006.568521300 +-9151581125716593042 2024-05-24 2024-07-16T11:51:23 F 1310106146 985949918 true 1444825572 8472932024323283565 -166016009100319192 25151.143 4.65537832338192E8 mOggwWAeLT -883052520444968683.952234231 +-9151702975453459606 2023-09-01 2024-05-21T19:05:59 cTQTm4g7Kk2vK2xCAP1 98580027 917464722 true -959429930 3334236995563629983 1958811267624042490 -10327.486 -1.528650012527071E9 yisXg7GNya -203510440031630488.979420419 +-9151945947862423645 2024-03-08 2024-07-27T20:55:45 ZhEgLT8wEsLBTZZEb664 1601772980 -1899001906 true -1318725867 -4565519934881429143 5489072191103584722 17158.146 -3.76889069289774E8 Jc0swzgyLF 385657282218447574.880186049 +-9152187709368715889 2023-10-27 2024-02-14T01:12:20 -1223869132 1839687488 \N -273237532 -1877825845137875733 2726691694014040238 -17147.723 -7.61507589040431E8 sblRIw6pEb 431580185626116579.343380019 +-9153904315526403422 2023-10-28 2023-09-30T20:10:53 YKMgq -596838303 -1239656773 true 292030167 -4067478167779612893 -8087123368763636110 \N -3.41985880019673E8 HClYuENX44 -674863236547677508.997659138 +-9154351313432685985 2024-03-01 2024-02-01T14:58:34 wgYp75Cgvg2if 1150733392 -555010928 false 2015632003 3058173653407834280 6256640695777285010 -19596.434 1.55304370003351E8 OsKxtzJmCj 220255835511219482.574827750 +-9154508291210231057 2024-03-10 2024-01-14T00:34:11 b4PYDzn -132967686 2009667575 false -1727334835 -6758540162654730744 3086561166520932824 -7327.0083 8.24020344920121E8 rz16XG3NoW 925625590335290615.778674156 +-9154893128271081542 2024-02-18 2024-01-21T12:36:55 DB -1437383137 640836605 false 92741274 9027850813178457656 1899761199320147697 17255.496 -1.751257267954188E9 hqozy54buD 835787087009434470.783958998 +-9154934919608885963 2023-09-21 2023-09-07T23:22:17 YG9xXprjOxS 204447351 1499709370 \N 2070218042 -3536511521323688620 -4096841570011305740 -15508.677 -9.36225634274846E8 VPYqzq9rqk -161170235949271245.244751163 +-9155427309848579791 2023-09-03 2023-10-10T05:55:28 GAwAK -663460475 1288343681 false -95494675 -181689227789066245 7843655796671909139 -3664.106 -1.016367896115953E9 6m7817xlxZ 268372281643006546.361527368 +-9155592271347869554 2024-07-10 2024-08-18T20:45:38 IkwtJylLj 1273053947 -753759250 false 2143202929 -380359274251270244 -5219807107366853321 17553.053 \N Ejr7MVtNgs 905699905607381002.126381118 +-9156310408883474946 2024-03-13 2024-08-12T19:29:40 \N -2023491912 1496163716 true -1333498188 -1436153233555042014 5777907742293691577 -676.4754 -1.010220684130373E9 VzzK5TIHbB -478268753826889506.513333456 +-9156508536758215726 2023-09-17 2023-09-19T01:15:24 Tfreyt6XLrN7Okl9 1320510470 2005799989 true 1774254035 -2442221478320421782 \N -10332.018 -4.1190209360853E7 rxyUDT3eTd -305820942417815226.818640779 +-9157037014209151535 2023-11-27 2024-08-22T09:56:27 n -1342288037 140025341 false -1747784924 4656644644997522337 -2410828532273841053 -20056.336 -1.367360799515886E9 MHZlbKUC8j 837246454372966891.134623480 +-9157071925643811873 2023-09-27 2024-03-03T01:22:04 XQ6rXY -877626140 1408940170 true 232181874 -3225649831156504389 -2254989194570549074 5992.9463 -1.649854905529787E9 nY0VkZfkf1 732652731108729957.562552251 +-9157463839567073950 2024-05-23 2024-02-03T13:41:37 LpveSzrzf5XqEjzv 201377167 902470301 false 268283037 \N 1958435623439351258 -11213.564 -1.912635348290802E9 2c9zNB9Idy -982466351077682161.362885288 +-9158076247118602939 2024-08-20 2023-09-30T19:53:50 HGM5ZynlQpgSD0U9tT1 249712697 \N true 1259149010 7580876588400933087 -717732034553292522 -25168.906 1.316463558879732E9 v5LqihWgD6 33878467350711138.125261131 +-9158989203168465961 2024-04-16 2023-10-08T12:45:14 psjDnLtG 835782753 -2138654811 false -484384644 -4139490369683762615 -7178252759276862502 -9556.315 -3.16683205701635E8 \N -261326999580518047.365703483 +-9159182050775999401 2024-04-06 2023-12-24T00:24:46 xnK4WaAnWnE8 662381826 -1119025093 true 895896174 1127392443126423150 7681816549048443195 23635.244 -7.09312367330923E8 \N 578777910240398822.587066869 +-9159221664797065420 2024-06-12 2023-11-10T12:55:15 rOX47c -1490496240 -1717380954 false -1126062563 1699997332681716688 -5160795517982906437 26049.51 -5.90421970815173E8 pX2wTuCzJD 282128435657817931.166341803 +-9159261794797039482 2024-02-23 2024-05-09T02:27:50 XW9r5pNwQWMJxN -1125310783 1456116419 false -1839388135 7716067083090788801 4061387124328376272 26220.832 1.133823785937455E9 Gb2V8af9ho 959418027271157318.910485200 +-9159435878353534032 2023-12-12 2024-05-15T04:51:23 1544972871 1643612154 true 970542320 5077289327513480192 -3113339037579463501 -24211.51 2.07903521839089E9 HuoLG3Qxmp -258913472743666351.738351840 +-9160269209222577627 2024-07-12 2023-10-12T15:20:04 AcnR3cnrDKLPNU9JXg5 493753959 270926870 false -621191875 \N 9210165909736683384 8909.223 3.057165488693E8 YMs9tZfdSl -482467105818206247.592286898 +-9160285911727462411 2024-04-16 2024-05-20T03:09:14 nWOrH2fIIZw -2032566162 1485389377 true -126998418 5721943366702371139 -1064025567145698932 27986.172 -8.45380831495046E8 hzafTMcATS 469770838848025821.133362884 +-9160483021994234893 2023-12-12 2024-05-18T10:23:36 QVpwE3cZBLISB0 -1003734262 -57637658 false -921575064 -144632375653081446 7010338343812591895 -26117.803 -1.89660678320667E8 AxrmpATJfl 593061048701400973.337744197 +-9160824894662144330 2023-11-12 2024-04-13T16:16:21 PKmWEj85HClwgg2aqG -1286520519 -967797016 false -456075437 -9186285947703115808 2003752030007939249 -14271.705 -1.331404262980178E9 ykPNgV9WfO -976373841328928284.752871994 +-9160825549135641154 2024-01-31 2024-07-14T06:58:59 2134195957 1014552412 false 874056989 7584396933977278987 2783766504847685861 2567.0757 -9.61616339608928E8 6qbAjHlpEh 796637394452023369.954364985 +-9161068868503979351 2023-12-16 2024-06-02T03:02:32 4mDv5T0ya1eoWI2c 1353568034 1490780186 true 1267438938 4497402760954062797 -9081055940125119042 -16586.838 -1.186651155290272E9 9KIb5wnBWM 584365156627872882.312048790 +-9161118411918949865 2024-01-27 2023-10-12T09:48:44 mGjAjprYyYCxO9pRky -588456151 -776567006 false -2053785427 -1584214947064574241 486992055189850238 -22450.877 1.880230384040262E9 mdt7gs2coM -598737806906238470.210962165 +-9161146329000299692 2023-12-30 2024-05-29T21:01:28 u9 1029596039 -1345404142 false 1246462066 -384283147425974911 -3624218830327761065 -1332.1456 1.140076532884312E9 3LawCF3xBB -282515072008832966.494049916 +-9161341866303823131 2023-10-07 2024-02-08T18:37:17 7sP3n72RtTMaz65dnz 2027247327 1188386733 true \N -3763967246404160964 -8975961138439754761 -19615.34 -6.99342013193663E8 ukV5IlIQsB -297412146177838186.330539240 +-9161345456428669790 2024-06-11 2024-05-31T17:42:39 \N -264998123 2102072281 false -201084347 -6922450510798363666 -2596024027133430270 -29138.73 1.3662799671982E9 2IQxZbtoaX 554487384208629620.969092643 +-9161722787696057016 2023-09-24 2024-08-20T13:57:12 2K -988825511 365909840 true -1181989174 940164080920178635 340223766427291184 5221.3413 -2.070423226305711E9 4okAhXXpJ0 919497933233420650.693359355 +-9161809015212190262 2024-03-26 2024-02-27T22:28:24 w2CB8dtXfJByJ -524950359 -443373917 true 1521310853 \N 3011872127652257364 17376.896 4.09129281859549E8 FTnipchn5d -976479185530752286.751254736 +-9162136101399540505 2024-04-29 2023-10-01T09:04:36 6cj -1481687884 -410894347 true 1841304750 -3184628844026135990 1853610216387854047 22632.145 \N 8uMkvriGw5 228623765858157275.720147638 +-9162611390279295666 2023-11-09 2024-08-22T08:48:21 -1720554246 1797131794 false 1243801313 -4612035051373109299 -5511642248867921798 21910.76 1.696498869303529E9 Hrk7AcFJXR 377744028347161391.744805232 +-9163325201180446020 2024-03-23 2024-02-10T07:09:04 XXFTl 1282976894 1772663829 true -1975053978 -269822391170365474 4525461236413520853 -13377.5 1.232833833464232E9 hdALL0Bx4v 938816757896026833.827169900 +-9163431522025479459 2024-05-29 2023-11-17T14:33:08 PDUgOe -1558530432 -531335513 false 1415413943 -6510419139200844610 4603576581492863894 6444.8755 -2.17447154304367E8 T1XnoYQxIG -609486480130540859.608064764 +-9163890339093306470 2023-12-13 2024-06-19T23:20:51 meksyGnnUMD -2015359062 1960205104 false 548748720 -8070591135298994783 -4721537622361213951 -69.279945 1.14448613151341E9 CRCwtMv8Yp -874338357328135482.389413570 +-9163968058857793987 2024-03-23 2023-09-07T07:21:42 416IOWGC5 -1061036419 310172232 true 1152002128 -4859791710180508668 -2089146857509284835 -30957.32 2.66198857167374E8 vMraLsqzyJ -925241550828516657.357445859 +-9164182408347446042 2023-09-02 2023-09-09T04:46:09 6NTgLzrE8Jv79t 1216061509 -74251157 false -722799398 5202570184192508827 9092408317696958136 -5584.251 1.431578595864882E9 aHCwUslpem -359652755221178891.246733524 +-9164201279673514480 2023-10-30 2024-06-17T21:28:42 04 581900931 -754689247 true -316019075 2595281764724268970 4651926177713878820 -30605.727 1.0256623162145E7 JfDElTszPY \N +-9164941274279621258 2023-12-24 2024-07-29T05:17:15 PjlNcL3lZJ6R7A1EuLn -1204918908 -1287724892 true 1825987063 2482782831300368221 -3325881061965045600 -14074.09 -7.1723419154252E8 TAaOkwOIga -542741707109482777.518649181 +-9165010820271855107 2024-04-30 2023-09-26T08:43:22 1 1405176619 750919966 false 700767861 7211285362557931369 -8056197897416990028 10846.105 -1.603738502697971E9 5G6n2j0TAn -15846092977474852.190519302 +-9165089984475091677 2023-10-06 2023-10-03T04:06:16 w -2021739246 -1446554671 \N -2058381327 4194532627266000238 7928669014410727873 -9838.862 -8.43165065231113E8 251OTacdvX -860600841707504995.801054990 +-9165318921817085165 2024-08-18 2023-11-11T23:21:52 Utt2zu4KkYK3Ff -1389371184 1344404877 true -194588315 7049809370802303152 -3002680588519035878 -695.7751 9.65763173331592E8 ONQdIE8Ep9 691001036430760963.532816893 +-9165452216982997830 2023-12-14 2024-07-01T07:41:18 -1336191908 -1983764432 true 1639945321 4930310813864544811 6380944825223713207 -19697.553 2.32902431978311E8 VqNn9czNGz -822736140052673568.203945053 +-9165589846323750835 2024-05-01 2024-07-19T14:11:31 -1090404616 1580106334 true -1747412530 8866290805121354674 -5668351098241171235 -19630.111 1.152665071989146E9 YvNp5R7yTh 28401941121550101.882075530 +-9165695475708627018 2023-10-20 2024-05-26T18:46:31 yRRg2811D 1973004630 \N true 1480416073 1931060086533068095 2270099968186605746 31160.05 -6.32823534900658E8 \N -501695245606825731.296394273 +-9165935755130226596 2023-10-11 2024-02-02T01:45:16 quB37JHJBHUOVwJhFtB 1254083034 -1613848676 true 982674485 -8383186052120340481 -2407186107204035012 -8365.993 -1.421818919119857E9 U6HMgAYToY -998593508262136327.173477870 +-9166055510458394162 2024-01-11 2024-05-02T17:31:23 wXDygSdED4Ggk 509872762 -1257888145 false 49163776 -2020031447915971353 7347225840147566638 \N 1.484107785825817E9 cxsHP5Aeep 282785190475903003.947485345 +-9166117796284303748 2023-10-29 2024-01-08T06:26:29 B2HpWbqtE -352527460 -161435811 false 37242718 -2425560389820756371 \N -2982.8162 -1.118810768044281E9 rVeqRXx4l3 531895375068255788.696313763 +-9166336158079705818 2024-02-15 2024-07-21T06:04:33 08 -1286104896 1000495336 true -2000827816 4662548526680734577 -569535651412725594 -18694.271 -1.94816082317781E9 DnycJXD59G -382269458300861827.380106064 +-9166541735307650394 2024-05-22 2023-11-30T10:44:13 qLU 734180183 -618762 true 1947957721 2885187893059103098 3922295190874464553 31210.484 -4.22125044915728E8 OzBpzahw7p -394787860477876428.511389554 +-9166887209485414855 2024-01-03 2024-01-13T22:58:56 m04YfU0p -1193678155 190336166 false 1244766441 -6623721540217274038 -1332681785371754537 16939.895 -1.037032129134094E9 vcOkbNoSMi 105947852863484366.590961039 +-9167263053886108652 2024-08-18 2024-04-14T00:16:39 vvfKxoTYuO9Dne1er -1537371189 -844186036 false -2001052494 1994776122103507954 8934951638837713966 -3603.7217 -1.66488068644783E9 9Zj4W7IXyu 652264171261262713.180238185 +-9167294409224545869 2023-12-24 2024-06-21T01:02:10 KpuSz510lHCWn7nfQ7J 1068642952 83172458 false -1256878632 -5343618037429849703 -2861095484373568508 26040.277 7.65912831764588E8 nIxwhad5RA 886147081739302737.143385216 +-9167952855631948142 2024-02-10 2024-03-03T00:18:23 Mc39j 456655212 -1844627831 true -1200532219 -3711918520665809231 168317989999229078 1250.5652 7.88503675800098E8 p7MOIOFK5G -634981321810799338.382054375 +-9168040709685176517 2024-08-15 2023-12-05T13:05:02 if1GXS1lGANKdN -1393499439 1615067100 true -1818713332 \N -7078562843615947929 -8529.205 -1.858466280374226E9 hRKDNbbOeP -490656204173047595.868276155 +-9168370287441063377 2024-03-13 2024-05-24T02:35:19 4NqApYUp21ikjHOmLz 1942038348 -1546002746 false 838823728 \N -1120925190512850966 29305.02 -4.57369240538568E8 7wRAHkBmeb 229145313661160541.501247400 +-9168598330601364819 2024-07-17 2024-06-26T13:32:15 j1Y5qzPbhGlXIeE9 -770011561 -450602734 \N 378239875 7285148125792726825 -4885876550415960276 21440.707 7.6047242822792E8 GaIDggZMWN -122966115889243689.159256998 +-9168984648927389671 2024-03-05 2024-08-05T22:42:21 o7N05vH3CO23yEOjPN -1118249394 614054553 true -1353070254 303280508211386885 \N 3094.7007 -8.50920363509021E8 DGZmlHyCVO 381039867932974330.327888102 +-9169120947859953751 2023-09-17 2024-06-16T16:50:29 33LRFD9 207796435 -1604379855 true -2124048680 7872677851533553683 1382385795938285619 30422.326 8.83914138339881E8 zFk43x9etQ 105828830227133229.959934855 +-9169484932977318809 2023-11-09 2024-06-05T08:17 4iJnwofxRqzhaYexnem -1771787067 1486824707 true 1612162693 -2206088258623914913 2976391421742427715 -27814.209 3.07771315025632E8 8erk0ReD6k 583805570926901116.483894203 +-9169490197635386312 2023-10-25 2024-07-10T15:45:13 JxXzoYK4IXtvPNVuxi2C -1112070588 1405851111 true 2030125462 -1456607944316614360 3926647496224243073 6730.4517 -9.29687308506003E8 9iPs9TnUaq -284671697606772851.552081630 +-9169933180889021862 2023-09-28 2024-01-23T02:34:06 Bnw3BewK 17453673 \N true 2095829125 \N -2058822845338364708 32697.248 1.946496802782463E9 prIl4D8MxR -108418929226619929.709609942 +-9170428474810169101 2024-05-01 2024-08-14T06:47:40 yVbYFYCrtNYVwdJdBQ2V 126604778 -431667846 true 1466125403 -6946583814992611842 -1222053987570153094 23255.334 -2.081555167521523E9 Ap8s30hur8 -818408048145052726.265663090 +-9170721631404088282 2024-04-13 2024-02-24T02:55:54 ON1 552038817 387836965 false -587981648 -2756900765133542777 788530383763854003 5679.5723 1.104468492857504E9 wRciuGXh9C 767404490145963252.889539610 +-9170740443418373834 2024-06-23 2024-05-29T22:48:34 P2N7E -1773479055 448257823 \N 978147729 2227728436011758091 2624735272027028956 -6999.4287 2.12030427797914E8 W1tPNixyXb -963128281251770786.684007092 +-9170821070141581473 2024-03-02 2023-10-19T13:40:25 Ivo186FSyUcDmGn4 -1646475049 1821175203 false 1381277020 3490924961477872669 7734268998046285123 4222.024 -3.81624761078142E8 t2zJbHMUyO 793349632074412093.313191623 +-9171029190884032916 2023-09-15 2023-12-01T04:32:04 vJ2jaTtKoyf5nl -733536544 -444742203 true -2120645159 -1735619884831804404 -7716646339557115950 -27090.402 -8.5018837698821E7 wmnTyDOiBe 535379496274356109.483817066 +-9171057064764627190 2024-03-26 2023-09-01T03:18:43 ZXleCqK80ZAK9Co 1393163340 1342012642 false -1290652543 -8666955538290196698 4692227573360026271 -4284.1665 -2.21394808814726E8 a3FrA4sLZA -356067234457122475.647980843 +-9171407215981931011 2024-03-24 2024-08-30T01:27:48 0nS6bkjAM5H9h 1934322789 801976771 false -1763172321 5797313905515959520 7491992753187356723 28215.701 -6.5524670007523E7 ojblCLQ5n1 -442193034690024627.880838015 +-9171503667151939478 2023-10-07 2024-02-06T19:06:09 gTyDaIWZ7KWzyfqP 831272259 -711856481 true -967208624 -2522225464355962143 2818307645021296485 \N -1.141389536596064E9 ZG7Nsbg4U2 422885530314463574.949347000 +-9171768727433949733 2024-02-21 2024-08-22T07:45:54 eZJzhl8d3Y -1489960178 -1159417942 false 410499188 -2043896227044249586 3900915024735152549 2512.0068 -2.127485395148053E9 V5pupcPktT 357467839117008183.917763734 +-9171860407960582174 2024-07-08 2023-12-24T02:24:26 0oHIdovs 1774940323 -781734183 false 102883600 2589878760660294063 2359766205880222697 \N -2.089310467540741E9 OQ4rhtSPqz -448636877169312449.555184200 +-9171929298260361394 2024-07-04 2024-07-08T03:26:53 \N -1085888137 1501833206 false 1319711337 -7442712983390575943 -1459424401247594536 -32439.473 -3.97921804621121E8 NY0cohqWPp 924432469191495904.430389236 +-9172136067319005232 2024-01-25 2024-02-29T09:48:36 3bACvx2eB2gYq7Xf 1824668633 -191253014 false 187378533 3233593418862674255 7750124345232098024 \N -1.690833107488973E9 IdSyabgLbZ -140496091373354815.218928094 +-9173269428640405952 2023-09-10 2024-08-13T12:35:17 78 -956063781 1120012451 true -1578604943 \N -2954718604091953482 26065.793 \N 4lEak4MbX3 979269359836327868.760631540 +-9173467709251484000 2023-09-13 2023-09-09T07:21:20 acWYd3kSWa95 -272608602 -20659414 true 1976847723 -1640219213649455969 1536697152802970670 2637.2744 1.758891891985969E9 SJAy9RLZMG 13242586974834499.678339292 +-9174150021104409703 2024-05-05 2023-10-29T10:35:13 QuXQvGCKv 332489972 -1855491292 true 1310462418 -1350916371969933338 332795249289653712 7491.5127 -6.04801758062445E8 JLvtlGrM0t -965900504533483501.285392564 +-9174212316512713948 2023-11-06 2024-03-11T17:31:49 5Z9B5S1oqO \N -2079972622 false \N -2070137366337523878 3708501560127508330 -28377.922 1.450900764823412E9 tgXbnbqHEU -700686834503690552.913687258 +-9174317991757665144 2024-08-24 2024-01-15T16:06:03 E9o5wSkvS7mGc 1437860615 1485951114 false 1566412038 1496370251686999165 565108517696695339 2693.592 -2.034020078192741E9 0OLAbK6CP4 359218549396142689.889454994 +-9174470393726355096 2024-06-26 2024-08-15T09:23:11 jRquX9YaPu8YX9 1587533715 1796968419 \N 1470669533 4524830257584786191 453982959664464192 17219.154 -1.713989040613415E9 DSfhyIRtMd -286901856206590261.356752994 +-9174480247976278034 2024-02-01 2024-02-29T15:33:07 kIm0aepjKHNZDTE7X8 -336307360 1787665183 false -1198183620 -7618037150916744858 750003106951821713 \N 1.800493601005686E9 6nPA7jTJEp 892573353283851252.685191687 +-9174720301053874955 2023-09-13 2024-01-10T07:53:48 Nu 346280460 -1696554449 true 1556581480 96745145216325446 -5571230772306590829 17520.812 -2.085161056530872E9 p4GvjztgoG -693162080147643069.486652210 +-9175123909443023512 2024-07-29 2024-01-17T09:43:06 hh2 677851190 110968185 false 978364671 -2275957805535919695 3152104238655381130 32294.768 1.148993759063521E9 OjboeEFneG -65769141782178535.639433230 +-9175274876706435980 2024-03-13 2023-11-26T16:11:42 5Wy3ijm50bE7zpC 1644051871 1676895451 true -674040374 3116478221008662626 -320717091687443225 -22942.672 -7.51808901907586E8 SMBnb0Y50m 200036588005572409.866135851 +-9175416335770485238 2024-03-07 2024-08-04T22:30:39 0 2047974883 255577996 true 1100169022 7286178363221254607 -7356298047968862668 -12791.82 -2.02766440493574E9 HM3jit3CTr -46007780645735497.722603521 +-9175471697369336978 2024-05-10 2023-10-17T07:06:41 Q \N 1172173781 true \N 6431154903388071408 4478782575008546482 -11991.491 -7.0255734263236E7 5Ph9NXLo1b -941734907732268850.157745816 +-9175562384800289083 2024-05-03 2024-06-20T14:32:18 udpZhEq9ycX11xCKB4 -516118916 -954461794 true -1634863950 6020413781957327161 1808144279525740660 22887.955 6.49118836885026E8 EUfnZJ4l5d 544744077849034711.292322394 +-9175692223656292607 2024-08-16 2024-02-12T18:33:26 b8vq0wT5Ht2PBg 1588752085 1560617180 false -132113003 -2196200474504528892 301562848014745298 \N 3.48287043364751E8 fCiBQDOhAn -601924564341368064.988545436 +-9175806701666769661 2024-03-27 2024-05-10T19:13:06 spWHcj8K 1253917231 1428824800 true 1294434914 -4096579483623986090 -5005287405934783421 -15113.846 1.476547606488925E9 hAAQZ7jknZ -705790702901425187.347517347 +-9176510569059440393 2024-02-22 2023-12-25T22:56:43 zucp8j6Fo1k -857668949 -1537950895 \N -1717074245 -4395251094541030712 -361248974108111795 13553.303 1.673079002217587E9 KvgXS5MF1L -207124469894441943.540768975 +-9176993287374630267 2024-08-09 2024-05-21T08:48:50 897760214 265235350 true -554216213 -2763083732708219141 5489474405067234848 -31496.46 -7.03037835787957E8 vunZwLC9p1 186082827946516479.612230630 +-9177023321295734780 2023-10-27 2023-10-18T02:38:56 hHGss4peFH5qCYDb2 -580389106 1080169645 false -1395450218 -4201259415023984566 -6630270345637118800 -12700.361 -2.014509015795908E9 ppXzVSud1d -269173419890980430.215883223 +-9177025786419333593 2024-01-12 2024-07-08T19:01:26 YP -288192451 -1194749095 true 1485594391 \N 2945494655044535457 32536.63 -3.01439268322659E8 dRFQ6uhMD4 69649942034562155.622483352 +-9177389265785246868 2024-06-19 2024-05-18T08:02:23 n 368617645 977601455 false 772811753 -4467797487699422274 -1736297020216976143 19432.646 -2.053398545922751E9 jwLhXyyAXb \N +-9177411721595183810 2024-01-30 2024-03-03T22:04:52 hOOOq52ZpKZeZFx -181529784 914489372 false 1001569264 -5844129062851870598 -1885318715129651194 25568.455 -1.236520441841507E9 \N -981560892546319954.751926889 +-9177452157150164808 2024-08-03 2024-01-06T01:58:03 9ucihY7wOzad05 -983414602 -305791929 false 1393900898 4145921089371111412 -7864374347797694340 -14873.104 6.74882908333744E8 4IcvWjfiAW 902851257933708334.173509981 +-9177754021788791931 2023-11-04 2024-08-12T10:30:35 z 491456543 723570124 true -1683290096 -95334866566630902 365446774380406672 -10544.507 1.712797892331936E9 PQKkyoo72I 237750281899911164.774540721 +-9177808152265680373 2023-11-07 2023-12-15T12:32:46 7hHwa7uY5xWi5xN -1593474070 764481391 false 873367353 7013521602314841804 -7911486591799344039 16798.188 -1.336756641796674E9 UKr2rmGQs1 -22737516692356917.714584351 +-9178487388640139200 2024-02-10 2024-07-21T15:05:34 ebYbZtqS6 \N 764456519 true 1811235828 -945192895788706428 90800083375681140 26762.758 \N ak2mQxoupK -235060980938041949.831301592 +-9178653628669402925 2024-01-31 2023-10-24T23:06:42 lZV6EbU94vDZimjOXl3 -2121077228 1307760642 true -2005671317 4740076179057732000 1198938220302783173 -2447.598 2.2298897063751E7 BDQeK4u1Ne -344293178067784159.579822541 +-9179464487231011664 2024-05-28 2023-09-06T00:05:25 osv5GdghoBufHaAX8AIW -1748940647 814802157 false \N -6610547857371651511 -9167121650948503843 30737.963 -1.439451885090049E9 nP6SXyinFM -660131616915552808.677365446 +-9180074192713047106 2024-01-31 2024-01-29T12:39:18 2x58Bqd2ZrCaHHUc3VyH -414818952 -1600448355 false -209741416 8671932090573022791 -3177162162932169005 -32299.088 -1.840513993701827E9 QmqTVSDB0e -130101713622174630.333113080 +-9180548923392059046 2024-02-15 2023-11-14T07:08:32 RVlp8MOrlW7 1898042368 1136558371 \N 1069587211 -3689401023420785299 -8062516340676077047 -25948.533 -1.481974196462466E9 MfbmMbcj8E 588823219683903969.953406512 +-9180726324935057642 2024-01-15 2024-07-01T11:47:10 PNunt 1185153372 -51473840 false 2102057747 956511386576192939 -7230374926040494295 5088.9604 5.85446540666136E8 VvAEfdRHQe -4399536147799593.630243012 +-9180870879934929165 2023-09-08 2024-01-23T16:53:22 aHanCwDxwOQ7YW1 467565878 968056601 false 1981581971 -4548451581582745304 -5858761871323862240 -8220.046 1.142078616555984E9 e507JbgBO6 319967982832009974.778391143 +-9181274887292747347 2024-08-19 2023-09-15T23:13:17 Z3jXb -1998333805 -40725760 true -1712362451 3925570203208742974 4559619898552225157 -25108.184 -1.903640805968051E9 ezp8CVBpgB -346942025495211016.518101824 +-9181974001839732316 2024-04-27 2024-05-26T19:45:39 \N -629141795 369372644 false -1854353575 -8289547530584606908 -2987687780972095386 -4315.604 1.740228131391749E9 Y8Re59Zn77 -232264047416150857.173650677 +-9182099635593422365 2023-10-16 2023-10-20T22:16:21 t9BjPiOmsX \N 2105256689 true 1176674397 -1307176187308884834 5559398616094093264 5081.0127 4.51916796745115E8 n2k2RxMa7Q 658924845935680514.760681844 +-9182135961491953580 2023-12-16 2023-12-03T16:40:01 mcM1g1MIdo -1999409621 1568876641 true -12768745 5062305570937775366 -3153734184345615026 -30285.828 3.21547799504241E8 trgSFTgKVo -518821451608914355.533633412 +-9183286882247151589 2024-03-31 2023-12-19T05:43:41 VkCUuIxapDmIpL -1921979808 600040008 true 671641204 5054817228082131022 -3669055007710790606 12449.544 -5.24723665972601E8 N4DoaDmcvm 696412061359263747.241279525 +-9183338149139263724 2023-12-04 2024-06-21T03:26:46 HX92txICUqo 1726466757 -1553734384 true -367277677 5221667338286333415 65777841454605421 -8058.5625 -2.053370488506376E9 tqTipxUmrz -11264944980961263.915793847 +-9183625146754392240 2024-04-01 2024-07-12T02:24:46 lZH7FbyfupAZ -339004860 794959877 false 1636982479 8548760936811469454 -8454213329074761728 -27402.418 \N v7fb6Iqf0P 390727565597485919.458810950 +-9183849813253378804 2023-12-31 2024-03-19T10:29:37 JZyqgTj8fl -842362675 1315983415 true -1090393528 5400771682480057646 -1851480908652749634 \N -1.74932309724181E8 n3pvqiXDCb 925423767744603021.154266037 +-9183878741545922593 2024-03-18 2024-08-12T04:10:17 y6aE 1890786758 1299180126 false 1516082859 1631673152228290079 -2860106499421696729 11716.291 1.831184619592273E9 TX4Qu3v8c5 -889562463754546456.765666782 +-9184981760906422626 2024-02-23 2024-02-04T16:45:16 pD -67590976 1084808798 false 1990220680 -612848558892619807 -9116407135040668118 20089.979 -1.940575330478274E9 UysDEZ3nEZ -381404531313386066.897555039 +-9185038155517057746 2024-08-16 2024-06-02T23:04:46 vHbBUrBke -1509160780 -211796139 true 1910781613 7804813759530222248 -7746912856280602569 840.628 1.105703877860086E9 zSAPXHvVKX -611557591258665461.930776466 +-9185053059806043407 2024-05-24 2024-08-13T00:24:34 \N -649473297 1253300088 true 1927234648 \N -5934776291642307668 24225.53 -1.351021679520949E9 kv8szSlOCZ -655038048038494122.919425414 +-9185308203296258116 2023-12-21 2023-11-08T03:53:41 GKjByzrpVfpDs0Ks5m3D 347647155 671290234 true 355227657 \N 2496685822974279665 20495.896 5.55589112951481E8 \N \N +-9185567622501734695 2023-09-04 2024-07-10T01:16:19 RHC20v 1493832032 -772651923 false -152932611 6692032671250161812 7409987912566916488 -22076.475 5.38278677507059E8 CNwS6iDIyi 698281392715019998.636911561 +-9185961023786126138 2024-02-17 2023-12-18T13:04:29 e3nhfCo 1617523048 -2018705544 true 692950285 3374380867483911976 3761557216929036989 -25622.13 1.694464254956763E9 sQxGLPxZLw 514498615113336997.795802706 +-9186118597309947492 2023-12-27 2024-07-08T05:32:15 fPWQwPCIMwbaJsTanwk -535951519 -915390742 false -2028765848 -364112300751033109 -6608804530837508824 -21069.412 -1.78833422653044E9 xplzOYDZbY 188563114491648019.416909858 +-9186540071057413253 2023-11-28 2024-03-21T07:59:19 s2utEnZc4SzXo2QSQOH -418861325 -1759966676 true -1503911970 -837354689745497732 7419602204939944832 19027.389 4434610.330315 XHnbcBY2DG -622128456164982896.609736490 +-9186769628178188515 2023-11-05 2023-10-09T23:41:15 w7Y4pGa6MgEh -1956790543 -1583835696 false -1290121962 -2591505337682441425 7462022861397522971 1311.7084 2.01415000430076E8 xp9H7vB2pJ -628249821564302220.566533337 +-9186983278859456244 2024-04-06 2023-10-25T18:48:11 hqgwe57u -1806814859 778798835 true -1977296274 -4717267908265799274 629161312921603967 2812.6658 -2.13187327663274E8 xX64Ed8Po3 -902748629533263273.896504796 +-9187205630408599333 2023-12-28 2023-12-22T03:20:33 p3BrhmKi4iaXt41nZLIe 258661362 -355913273 false 1882269180 1902328331127398038 7376135903431932893 1642.6755 3.06612666719491E8 6ajuwazcHM -466126571161053235.167851700 +-9187266679276938585 2024-08-10 2024-04-06T04:05:04 zYnVZ3Qb9Tl3E -1146759807 -1501812902 false 1641175791 -4668243885124279883 5696675261379036531 4297.3843 1.084169887559058E9 Ug7Tk1iMp7 -933183271362385255.736951562 +-9187644224003139174 2023-12-23 2024-07-21T09:18:38 YnQEvdrH1YvdjzMasv -365323035 -1368320052 false 2047617352 \N 3788514820386015853 5387.875 -7.03636979330193E8 JS18tHC7qP \N +-9187695070740964873 2024-03-16 2024-04-03T06:10:41 KJpMU403JGquzHj6FVu -1668686754 -1001065118 false 808345686 -2049863197180676772 -6192197928629239706 28576.3 6.69599017125224E8 VGSzRqnGlJ -910704646583402990.312447525 +-9188344857972289820 2023-09-08 2023-11-21T09:58:22 HMUL11O -476783002 1566171138 false -546416696 5032825314086769864 -7789089050374841094 22282.605 8.22670840332375E8 V7901m9tRe -587041118159311553.819964229 +-9188351448311647697 2024-01-12 2024-04-27T21:33:04 JnMc -219311580 -1983164708 false 1573698809 3865422371576836053 7271597297162447581 14766.428 -1.828242951186497E9 5kaNI3Zhw1 -607056261807384722.456975900 +-9188780176191730292 2024-03-16 2023-09-26T03:53:35 rX6y2DXEV -293897235 1544555769 \N 1956295176 1169089110499913920 -2130026889007621312 30942.426 1.05851211431844E9 8Zv2P1tEEy 602408858274078103.225889532 +-9188925965579178613 2024-08-13 2023-09-26T11:58:10 a5aGgAGHwNTpoFi 1541383280 969695116 true 2111683193 -2478625850850804703 -6695928550126912826 30885.533 -1.661621821448801E9 tMgpFMxu9f -361109332174504271.108581903 +-9189094931400403537 2023-12-24 2023-10-13T16:51:58 g -72239513 2050110231 true 1582216990 -1594665911539503569 5253635717751136541 -5713.0215 1.380953383112443E9 1xLMSvEQnM 880269738129582426.839863470 +-9189208936490268290 2023-09-29 2024-08-14T11:11:23 zad 895385640 -735890788 false \N 3456754813075170064 -8489592196880292048 24036.232 2.3648228103497E7 3WP91AFmTl -284023533956722086.820416326 +-9189327803808317186 2024-08-19 2024-02-29T15:18:44 v1MdqbDtNFGTS3FFERX -1402819586 -946491517 \N -283916438 1098882104860989069 -2715256801311382110 -19326.932 6.92438669082219E8 yKQ0YbOkTd 498522374778510564.990161742 +-9189994084488323214 2024-07-08 2023-11-02T11:02:51 gPKr 2135429225 -1954933666 \N 244967536 860903740667124180 3119229115734790809 -5090.9355 1.4647418892211E9 \N -349039090227645239.193912483 +-9190296667023569597 2024-05-17 2023-09-27T06:13:15 omRFCI4hcGTPJ4npa -1805055316 14538402 true 626125551 212003140391335911 -6977862811639859893 -7876.545 -1.125346635873957E9 RwGWDtITmY 878328773367501622.680363835 +-9190314802265452255 2023-10-12 2023-10-31T06:15:31 \N 1155735263 -2144228633 \N 344109059 -4474472344170089205 -6125922980786723150 23004.408 1.29029485093375E8 HK3BtlJ7mu 318342733401335218.695418011 +-9190631965935004459 2024-02-10 2024-08-30T17:11:52 0DAgvKlO4q3FrK -224418744 1983102563 true 605575330 4654377200965161242 1185573741480921773 -10699.571 \N KIRFjoa5f7 -59068045622171570.598709753 +-9190703787786464243 2024-06-28 2023-12-27T12:00:56 f5NT2ywJnIoE73sPKe 1782520904 -325340699 false 1651688504 3309972835649488722 5391211386699345737 10346.404 -1.491672366732499E9 f6B1pK331A -395881690548892606.891879745 +-9190931882142647811 2024-06-08 2024-08-17T14:22:26 ERKN7LWesRR4Qlw8Yw -1828669891 -190889343 true 2047553170 5386768147264230100 595010264980500459 -31456.062 -1.351675135336415E9 0LILJZuvz2 -949990748492508594.562125592 +-9192106530005127454 2024-07-11 2023-10-08T09:01:17 2nh -1695894549 -2091561694 true -1556248849 -3477170407476208176 3099000813588383350 24279.344 1.849269734701197E9 UxdW2pvEI9 -547636605101853958.381454967 +-9192205061426378119 2024-06-25 2023-10-27T03:32:11 T -695938968 905581019 true 1327406102 \N -1804073140349550397 -29011.36 1.781912604937556E9 8Wpm6Jw29U 201824091756197420.919845022 +-9192255433056489820 2024-04-14 2024-07-29T09:15:33 lXdTQqDEkl2NfzMjc00 1012046376 -435312663 false 1504159600 150047891531360054 1538636375657721767 7654.717 -1.874347223360686E9 NUYRBEX1fK 71786167356472754.521353233 +-9192416338944391527 2024-07-30 2023-12-10T20:52:46 ddVz3SUlIE 1891343612 590859506 false 123426564 -5121360526319580183 -7717761929590805464 896.9746 1.086003909244513E9 NAzj9UXTO7 -249920360529895983.743464312 +-9192614410476965803 2023-10-13 2024-06-29T15:40:31 tYLA4ECo -1438339998 2055866370 false 615733169 8282230276064941898 -2333462324437692281 -13766.414 1.622656354254882E9 wC1BmZbqNI 365067834330773298.892553053 +-9192819622543242937 2023-09-10 2023-09-21T17:28:47 N4DTEVGlKDUekCJiOXu 1060802468 -564261492 false -230122495 -7742524486374758766 869058425520923439 9623.977 1.92422377502072E8 FCNUsAlXz3 338254418163208039.252678345 +-9193790113423961007 2023-12-15 2024-01-17T18:44 0xGgs4iqbf0IQRhLT -1510660395 1802105943 false -1912009640 3059774549003533246 -5468591009489582694 6759.6294 -1.015975795968139E9 zqCvmIR2CB 548268045730877751.567084123 +-9193862880526465279 2023-09-02 2023-10-07T12:35:41 rn0TL6HEWXfPF3P -814896262 97486940 true 527823410 -3681887944497348293 -3225934926344792016 22183.99 -1.396675675763258E9 P3sMF5pbej -583105531262360268.368867689 +-9193975595393901816 2023-12-22 2024-02-08T12:06:31 Ze3Clqi 819438270 1827271511 true 47719023 8033971114341554248 -7827184285608644036 32346.432 1.585385041279408E9 rFdcAezZxx -673496235962031006.641230648 +-9194420311196843511 2024-05-29 2024-06-12T13:34:46 616894301 1691789302 \N 661172380 5445669221279684504 -7518100017963693829 -27462.627 1.110773967224626E9 1hHyIiMvr9 -32944087260421279.771628574 +-9194438995886354061 2023-09-07 2024-07-20T06:11:34 PPTC7 -2128818151 \N false -2120892015 8568927886704760815 -3624488137444735450 30035.645 1.466833871197625E9 tKzQZsgDkn -506056419019212496.721973024 +-9194518760555128247 2024-03-14 2023-10-13T19:03:03 \N 1037844818 -1397244450 false -1029717731 -8964248262338613393 2604477574374042562 -29858.205 -1.840137612188602E9 \N 390302127595788192.569702533 +-9195348538345339952 2024-05-28 2023-09-26T14:06:45 3nW9LKhhzlmNkOXy 1756743971 -1419448006 true -437328661 \N 3001490662075049262 -16066.368 2.70649752295551E8 WOhIFfZIcF -275750074757702148.461250923 +-9195588520992739104 2024-01-29 2023-11-21T00:58:54 MgA 87539932 -393015533 true 447238828 8764364661193891878 4159953380181681930 21998.613 -1.161829772405384E9 \N 855399561179825379.258420321 +-9196641668894717133 2024-01-03 2024-06-19T15:55:41 ikWvMUEzz 1930607351 1273589107 true -26563835 -8708396745471042417 8951942479173794093 -15664.171 7.39197789079958E8 vxNVxp8vHK -686061645760392040.932658776 +-9196662275297568184 2024-08-14 2024-04-11T13:14:05 vtXjxeDG73SQ 761590567 -1167357030 true 602077254 8468976351895437635 -1822398119071600991 -21584.168 1.481850018412473E9 \N -229275247861585858.681110861 +-9196785236121321535 2024-06-01 2023-09-16T19:09:45 4IwpBBHmqfzly -1705274598 1767786676 true -621387148 1011080058560104227 -1159189753573185684 -31506.73 1.330570252279706E9 ZbHgAmkPja -661406111122748264.437513710 +-9197012296122253282 2024-02-27 2023-11-20T22:22:27 hId0d 532515186 -1758919759 true \N -2093939854112405727 -6666081100185760580 5745.271 1.876774799228748E9 bXEBe4qZwv -524359644302330054.501620405 +-9197015483363023219 2024-02-20 2023-10-18T05:39:03 5bHRaJBIbZYgf -636004134 107177515 false -459379008 -4439740408492110678 -866782065974442350 736.2361 -9.8159796929685E7 \N -364949599667749435.946673408 +-9197608958454830428 2024-08-24 2023-12-30T20:37:13 qxXM 1116208773 -915965557 false 1707626086 8951102301605075032 -5316065969126595644 14186.901 7.73304547444298E8 zp7FcCTQe1 -559223993794714489.591279798 +-9198401103961060461 2023-11-09 2024-02-01T23:41:12 zfd -1511618212 1635194532 false 1879550584 -5519982660969114412 -3382146971442213738 -9646.194 -1.593216929445349E9 vnht0fNeFH 656083351247166919.777986070 +-9198767894696203559 2024-06-06 2023-09-25T07:06:22 gU9fjldjB -1730697175 -167673852 false 1099031902 -3005076321516007565 6026317279844865861 32602.844 -7.9541825861114E7 Gg8Yp29Bdq 261333310650483987.211565247 +-9199324092517014251 2023-09-29 2023-10-07T11:39:48 rxxVLcbTBD 971064155 -270363888 false -1393927118 7461016291571083549 -4600324371363937181 28824.457 -1.722165431098112E9 7ZNn91Pccr 652540486260021563.678452015 +-9199415165927108520 2023-09-09 2024-07-02T08:47:41 5EG6 1256259012 -1373456574 false 1230520083 5574274675973334265 2833251364539246282 13812.657 9.68367861071757E8 Krraf8fnR5 171166334365225105.390015501 +-9199429217351587626 2024-01-11 2024-06-15T15:55:43 GCoPUq3btJTUEUJ86XTZ 1421616400 -1012253776 true -106310821 1576686139861501312 8922416670133255685 -17183.623 5.31138719225756E8 FU4eDTYLJ7 286476795163388008.692100360 +-9199720290584831915 2024-03-24 2024-04-06T02:14:01 x8zOcrsUUVoL19E1T 1199947542 -1051122787 false -1055839216 6971510670939144005 -1842563676469646063 -13425.764 -5.24240828008945E8 975yAzJrlY 990746919583667538.179776119 +-9200068220955605670 2024-03-18 2023-12-12T22:33:58 \N -148857930 841762471 true -649489895 -7396540115412549060 756689851726267126 31327.428 1.829444367179228E9 2ggNa24J55 -888968231693056160.944412491 +-9200566647319903538 2024-06-29 2024-04-12T13:35:32 904979225 1389048765 false 102441049 -8540231863781498165 -478670212954939303 7557.7812 -5.11440451636404E8 YvPv9W05mK \N +-9201431039912298512 2023-10-26 2023-12-26T02:51:16 UDKJnqOsSvHw6JUe 104373635 1327533780 true 204229649 -9154658607143312434 5016875409471033333 -20696.523 -1.280520850303602E9 0QXqQT53Cy 168534371535082406.747883540 +-9201845583860070407 2023-09-06 2023-09-05T06:57:10 -1262886897 1196556155 true 1782859768 8812286808378340618 1415283319291159956 \N 4.15594101911135E8 Q52TdIiiu5 14357522171478668.811626702 +-9202386240289734812 2023-09-13 2024-01-13T10:00:59 SCfWonwh2mIkdJojZPl -1955966608 -1525874177 false -1342641788 -1487159520686459729 -8532395122275787375 24031.291 1.26544221450176E8 gGLfXTd2ol -677386165271614270.123528672 +-9202585224209244068 2023-12-21 2024-01-12T00:27:21 bxJsXrCujY2 -1049773906 722423879 false 179260560 7919014389179563183 -379205455655613712 -32199.023 1.36420701251463E8 prkmHUKR6h 951624714823865469.755004151 +-9202621820966279719 2024-04-24 2024-06-08T19:09:29 clvI7UTnCEdfh 1763858363 647761993 true 1502617022 2448706429132286230 -3835319304862338061 -27661.258 5.56829524219309E8 7kt86MR5ks 901829750455131189.989304407 +-9202938421943525165 2023-12-13 2024-01-31T21:20:52 L7TqM8wO 192279916 1882214947 true -481289613 -9114387565979757620 -3139473699425532467 27660.584 -1.205220675030983E9 NsSDptVldw 927006833562520062.686894931 +-9203129487982800586 2024-04-20 2023-09-28T11:51:22 H 436898209 -1034186756 false -747545871 4484115705668241925 6375869549314058505 733.16956 3.90668057718052E8 OGeQOeIcFG 969616342085123509.275457013 +-9203195362937708590 2024-07-23 2024-07-21T08:12:24 \N -1177107245 \N \N -1148906183562682422 3563233790895070226 -8087.5923 5.84140863033663E8 m8Qyvk267H -384644830050428386.845841546 +-9203589666836836594 2023-11-02 2024-02-27T08:31:37 BjT3fmeGu -1555638773 1437572281 false 1928633586 1880030201097065307 -5413053575071172793 31550.1 1.794680326183986E9 Nxj8UnvoAo 148416362716094235.761657143 +-9203751631981355876 2023-09-29 2023-12-22T04:42:36 3mwlKfZqTxKQTak -520747538 -1081392398 false 1382528439 3937459132038811617 3755214993891519982 5218.95 2.66179362715151E8 UW4PrLpdWi -905141182018465016.591268725 +-9205089538343864075 2024-08-20 2024-01-12T04:11:27 lKHuHR55A1yT9J4g -1320868018 1673624756 false 1517102997 1592842365230262730 -7121103089222205947 21029.916 -1.274102231957671E9 XVq9dYO1Mx -487672053550293238.269929959 +-9205209797594830519 2024-06-17 2023-09-20T22:46:07 eSk4LdVuLE -553964294 1823093304 false 280303976 3352836359809428248 5230992689393807670 430.3959 1.427528772104282E9 \N 940811533228655889.816860998 +-9205284027514471320 2024-07-02 2023-12-14T23:07:51 a84xcfT9HTbu 827171376 -546960411 false -377421651 -5805924813830363076 7233782307902915571 1658.2771 -1.890038290043019E9 eQzEbbU130 552473491441428595.147306929 +-9205362766364831735 2023-11-03 2024-06-05T09:31:59 rgJsj4m9IPIw0 1988480891 611047623 true 1817325058 6250814052712808400 2628630402518522896 -27069.928 1.849001879561813E9 lG4zBOzziH -528701207671289426.620374901 +-9205688175629130674 2024-07-09 2024-03-21T06:45:59 AveuDBRc 1883001636 -420331030 false 945883068 1497280243919601055 8823204941418594648 -19925.902 -1.697397990954807E9 kJuwkoQtkU 477048212351277263.217163068 +-9205798436398869799 2024-01-05 2024-05-14T22:36 MNQPSX44 \N \N false 1451888746 1155057420215676071 -7436059527298060094 -13096.045 1.29608038275905E9 a74A2DD5yd 446969774530619699.927825415 +-9206767340787289009 2024-08-27 2024-01-23T20:42:35 q4VT9k0i -2016978952 -1450217420 true 351389822 -3942699743580196117 -6330894040101415428 -14474.354 -1.043674986401715E9 e6IqN6BEsS -146936812090346612.529972182 +-9207134704558162185 2024-07-22 2024-02-03T00:42:08 V130 970967784 1747318464 \N 2101676265 -8224485958173331064 -4060905149567034027 29801.291 3.89264581507014E8 cPdUsw6SwH 403829044415417453.722152774 +-9207156853249684435 2024-03-02 2024-03-13T00:34:25 P5WIoWrbnr3AmqG8bxpA 409116118 2011998752 \N 287402735 7087584559975083178 674840365692754793 21162.715 -1.552601730216824E9 \N 833131084289528123.331117856 +-9208115240504031338 2024-03-07 2023-12-05T07:44:25 E09jLcayjPDe7w1X -386351458 \N false 184612488 5235820525149692531 \N 7149.0664 1.576125390532337E9 VS1F1AZsjt -416357097864795441.616163626 +-9208153380646280242 2024-07-05 2024-02-07T01:26:31 yze50bLN96oTeG -487138984 2102135158 false 540854035 -5006211012662062378 -7348102901136142559 -4633.5024 -1.896929326166523E9 qMU9qVWuCI -935503151849178276.161561030 +-9208597174699759207 2024-01-17 2024-02-17T17:20:25 VRKAD30xek2nfFOP1x8b 639204755 1675651141 true 1760400470 -838509120683366410 3343906388340004060 6268.233 2.011362538817552E9 R4bUqwWNWj -833369321611182088.841699435 +-9208807398485723822 2024-01-19 2024-07-06T07:58:32 AYCcRlAv -1986812904 \N true 113199101 1665415717043014567 4018200745386490147 -12867.746 -1.228137158046407E9 QBnHJziSuc -592143508677570335.360492478 +-9208984632050465244 2024-07-24 2024-04-23T03:26:56 QOdNDZAr2hLegqkS3S -1743131417 360697198 false 1552708010 1092659706561719352 8343477659215655166 5417.26 -1.565302977528278E9 hAjV7FOSk3 \N +-9209930063936199201 2024-03-31 2023-09-24T16:34:28 -836790328 -933844265 true -715378381 -359790362883149935 -9053140555557613636 7898.7744 1.668540995429606E9 EBoVClTDnT 774261359151117227.245974278 +-9209932893563106270 2024-02-17 2024-01-23T12:29 iw4CnRSxsHH4w3foOd 1192706935 -1866099746 true -1756484947 3701507922980462739 8882561991814487963 -16179.618 1.188095782096582E9 SI4KSl7s8w -892689850182141395.315904257 +-9210223709372057825 2024-07-15 2023-12-03T12:48:40 xX44QeX 325026900 219374367 true 1290897242 3723078455352067133 -4419145089498170666 9827.3125 -1.160095328458917E9 wgp3IVu7JJ -731333566566033.186292120 +-9210438321099414643 2024-08-15 2024-08-02T16:56:34 d5ixNbX44YPqkNByQ1X \N -966737505 false 1917424361 -5379268161407866635 1145609757117837533 30977.174 -6.04181024046666E8 HtmNfdjbXu -453199888193657393.693053277 +-9210481938069798713 2024-03-26 2023-10-21T04:59:39 5 -1337560910 -365109389 true 189186711 -2942721142127778090 2007559089630789140 -2785.1565 -1.623974875547271E9 BBqq6w3toS -774792005979929521.857282801 +-9210676687256941863 2024-06-20 2024-04-26T21:26:51 Zv -885774306 607707371 true -195811901 -1172350616626111658 4272424271471020132 -26129.605 -1.266297961560255E9 1H3KABS81r -975551179817424809.147090637 +-9210905455052085186 2024-08-24 2024-07-03T23:29:01 JX \N -1982819525 true -2043598190 -3880396530354399643 -4686565188792740641 12162.691 -2.63789106589356E8 algvwxFEec -295707734260789094.437863914 +-9210910724347450878 2023-11-23 2024-03-27T14:14:42 H6Neusf0t -957272887 637635314 false -1655881636 1723766118529448641 8211310770035398932 \N -5.7306221495245E8 YdekmlNo1Y -896833934970563478.221698896 +-9211004560218199479 2024-02-21 2024-06-12T16:03:02 UejbJDMbEDS2rE 919291725 -1500883707 true 119878560 -4713122851598324090 8235966303789637313 \N -1.222419439798664E9 Z7fJIpEKcz -771270215664834265.681108331 +-9211608846849618827 2023-10-20 2024-05-28T00:56:30 19NyMfLBVQHmwh1x2wMD 403157053 535434629 true -1998547224 -3126891140851388706 3192724683412042024 -8488.201 -4.75552596696089E8 soHkaZ7voO 733400976526798551.702913050 +-9211705256682410078 2024-06-25 2024-03-24T20:54:44 -271880837 1111758918 true 1620814961 7496186477282975961 1560358348080498663 -1226.3821 -1.0927009592206E8 jOcmkv3TLb 73786210787471104.677460178 +-9212592288817692752 2023-11-29 2024-08-09T03:53:37 63410598 -2077977590 true 2092057801 3741131526188726168 -7449937588187839187 26116.812 2.125434397709386E9 SRmszObQoe 529007534307915425.292183673 +-9212715351501833830 2024-02-14 2024-07-12T01:39:30 \N -1910279435 493759852 false 1544384017 7732612933198929910 5482735412140175467 15188.236 1.766458567689158E9 VkB9QNy2HP -242767979794600005.559609890 +-9214599354135012510 2023-10-15 2024-05-26T02:36:30 k 1524144269 -471011879 false 1880035082 7850086426595410424 7673679475570048809 17721.348 3.30544980513995E8 MGMYbY7Twh 455971780917807877.550981242 +-9214913714225531114 2024-07-16 2024-03-30T01:37:39 btw -110061679 604310336 \N 905797038 9145024441214448775 -7169224715389521752 -15784.189 -1.215474326929554E9 g0AacAXx0o -605156748826450426.692091246 +-9214934369558285447 2023-12-22 2024-01-27T18:31:14 8SwMI48PAASLNMp0 1611783442 1380045003 false -1450987068 -4835946252212438513 8154062831465956208 -3502.0137 4.24186018062715E8 \N 450386948927429428.661502413 +-9215277983021557448 2024-01-25 2024-04-21T01:01 9MVrw9wfMjpUxyZla -1029376655 -2123381823 false -1375531557 7727915340647359257 7317091577148329275 14299.566 -1.365743986499499E9 iHJCrByUw1 -785389577022382111.635569750 +-9215325396118543774 2023-11-27 2024-05-05T07:03:26 zZyEBs 941859681 1485416330 false \N -2443265032808648127 \N -2435.435 1.685745901622116E9 p0drYJAClz -74681302707275061.790564717 +-9216199167668578030 2023-12-29 2023-10-30T17:42:34 D5xd -1892825923 1822683474 true -1751127620 -8525491198762402859 -8996196607508044500 -3478.8928 -1.079318487439645E9 AB6gS6WqDd 456377995653772647.341724200 +-9216453729165126130 2024-02-11 2024-04-12T06:43:18 kCPuioGZ0N84juEr4VCI -772738147 2121165844 true -24260652 5058534776643251949 -5047885969521573028 -17393.287 -4.1960958251555E7 bhzSmCMeaW 906769487045397959.656749524 +-9217046390929661484 2024-02-27 2024-06-17T06:55:25 T1ZxqEdJkIeKj -1656268370 -36192836 true 1612974061 436388036813264080 -3242937141783976010 -21219.506 -1.224923436417099E9 mAAbJxmKPi -520148539609510022.930857729 +-9217065269856175483 2024-05-28 2024-08-27T12:00:58 hk0OZsroVb2 -1208293709 -1520507702 true 968821578 -4599070455406822086 -707609514538684092 1858.2369 7.87790218026362E8 U6knPUOsoU -430815278168912408.947478670 +-9217980683241799001 2024-05-23 2024-01-24T03:54:53 YQJNlaBHSQTPyUkcik 1028840146 154817272 true -133119771 -1672553914789742622 -7043431389391986046 -11136.581 -1.44594390412514E9 HXzkZmZD8t \N +-9218398123636299706 2024-02-10 2024-02-11T02:25:23 F8X8 -1555442779 904213934 false -2128750766 6558592574321151794 4834956687450908674 8046.9727 -6.66108670559678E8 AyjFgWsqji 230824397867572504.137089271 +-9218621539845078186 2024-06-10 2024-03-29T12:11:25 kNjDiQoZDkPw5zm -989934173 1042793465 false \N 1533273927958705717 -5614062109690645019 -28591.03 1.532056900185374E9 u6osprWTsm 137168542730041923.781015683 +-9218681672966440142 2024-03-18 2023-12-02T05:09:03 A 2005455367 -898887229 true -428941120 924754646746996887 -3669603456744703028 \N \N XIvfGnAhtB 755736031301129405.449490820 +-9218707561525969914 2024-02-12 2024-03-03T02:33:09 vUD6in 24709012 1123749110 true -650771863 -5690449909574488469 -1508683740093959 -27516.291 8.70156013898118E8 5oo6HGw3vr 113525344488075660.854155527 +-9219237188076269518 2023-09-26 2024-08-05T21:21:11 c042khEL1lK8hUu3tCJ 1357777465 \N true 1068668689 -5969467072581459848 -3742793732165076726 -27471.016 -2.139119065960054E9 2IpZggMCQ1 -318108136285410452.677796918 +-9219627600811980057 2023-10-16 2024-04-10T22:10:33 6pUPRZM -372659163 -1643205713 true 1391797626 6483574890345200948 -8965612487285185999 -20016.977 -8.5790541124178E8 LsqU1p6DS3 \N +-9219760130111953324 2023-12-03 2024-06-11T07:55:02 mJl4LhQCK9bKhx1OOn3 -1737060993 679990434 false 952256564 -697616398016579070 -9043575510054098937 23595.43 -1.095161983815562E9 WrX7COvCZf 294636494722069563.368232660 +-9220101064984915720 2024-03-17 2023-11-20T19:36:41 21juyz2Fs5xQly6tbK3H 1055508631 -1613634324 false -1374618765 -6809195599606046989 -8295954004950149544 23471.564 1.03141898864332E9 8W1tUxjvCc 181935863619167336.733945780 +-9220625298471865289 2024-08-19 2023-10-21T06:11:42 4JCByPf5LHR \N 7510827 false 193879798 -5442623580797492771 \N 16837.385 1.925973363222939E9 q5uFZnD3b6 605915587835819314.278489397 +-9220840470776631571 2023-12-15 2023-08-31T20:34:12 4fIQ -1984935609 -49946462 true -1377292947 415187008938562752 -906603665780095309 260.8808 \N oYFewi062N 772614308244163956.849110100 +-9221048694475803853 2023-12-09 2024-05-29T17:05:59 DOkTF 716990436 281754321 false 2078657758 3948677820543858916 -748781835751437649 -16654.113 -6.38745488501596E8 vV3PvMI2d3 -316839048131557767.889504320 +-9221170749615322445 2024-04-10 2024-01-15T21:56:04 T5Af -1919846941 -794066337 true 96734189 -8229306330899982087 368597291693203562 -11996.822 -4.3208571286612E8 IhqVGiOx1G \N +-9222054472554220893 2023-12-07 2024-01-29T18:57:38 \N 417221243 473669340 false -1594489920 1141074138469186807 -5483494134164076485 \N -1.22427260034581E8 A8kpJsSXDX 480134338339769340.564758290 +-9222217180579919011 2024-01-30 2024-03-27T08:57:24 9gqePjXZpGEvAmTR 626995695 1985597517 false -347265729 1535297101897361306 -2658197523579176750 2530.3462 1.160892686307142E9 RcN0ZyAUAX 668106507137108086.383408520 +-9222424141766309735 2023-12-25 2024-02-01T03:28:59 yTM 1982864152 \N true -1827554857 -4906392448285957886 -7192727078901464246 -23235.805 -1.333731802671467E9 etzXdttVA9 -379173962271847232.147379474 +-9222555522754779148 2024-07-15 2023-10-15T15:59:08 v0B 178466867 1887066796 false -96766846 5712483288720265730 -583980391154750940 29348.494 \N Cgyuj0VUxe -497264908698463953.832712141 +-9222977483484913895 2023-10-27 2023-12-03T04:52:19 RAwmrKyExk -63154601 40048902 false \N -1517539584417033857 893378032192152033 21143.404 -9.01319056665119E8 OOKYeuxjwI -734481232313975.916622906 +-9223041722010267486 2024-06-19 2024-08-14T17:07:21 DYec3uw -2074988865 1318144580 true -95713133 -5871272434790543067 3723183147091979164 11704.362 \N tnwT1goH6O -905886617532506879.817136572 +-9223060059729495686 2024-01-13 2024-03-08T17:37:03 uMp 728083059 348863789 false -1388036821 2360970742293300105 6666596030140300374 -24317.262 3.98534952578531E8 QH0sYiw5Uj \N +-9223074526313496060 2024-02-21 2024-03-24T06:40:13 4j -115580059 10337980 false -1734542079 -2500409058764693168 7363152679661858273 \N 1.853865193617529E9 oSb1zzSvxx 219041194932433259.458438430 +-9223360872545067013 2023-11-13 2023-12-28T21:56:33 TMrhYYhUekzEuwj7dxKg -1674469194 743691972 true -1241164453 -186491802511024435 -6362455035848615197 27456.645 -1.49335608778418E9 vEw8GSvve1 -258042404709384541.130390450 +-9223367835027742371 2024-05-03 2024-07-09T22:41:30 aZ -1075247014 1131556171 true 2113034866 -228151953571198558 7962970568051372694 25622.639 -6.40958918300861E8 C03lHZBiBx 676966551051266228.631762113 + +-- !select_cnt -- +60000 diff --git a/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out b/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out index ec2058ca46e0bd..acfe2adad2ec46 100644 --- a/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out +++ b/regression-test/data/external_table_p0/jdbc/test_jdbc_query_pg.out @@ -1358,9 +1358,9 @@ true abc def 2022-10-11 1.234 1 2 3 2022-10-22T10:59:59 34.123 true abc def 2022 6 -- !sql87 -- -1 3 -2 0 -3 1 +1 0 +2 1 +3 2 -- !sql88 -- 1 diff --git a/regression-test/data/external_table_p0/paimon/paimon_tb_mix_format.out b/regression-test/data/external_table_p0/paimon/paimon_tb_mix_format.out new file mode 100644 index 00000000000000..dca960f8c8785b --- /dev/null +++ b/regression-test/data/external_table_p0/paimon/paimon_tb_mix_format.out @@ -0,0 +1,45 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !order -- +1 2 a +2 2 a +3 2 a +4 2 a +5 2 a +6 1 a +7 1 a +8 1 a +9 1 a +10 1 a +1 2 b +2 2 b +3 2 b +4 2 b +5 2 b +6 1 b +7 1 b +8 1 b +9 1 b +10 1 b + +-- !order -- +1 2 a +2 2 a +3 2 a +4 2 a +5 2 a +6 1 a +7 1 a +8 1 a +9 1 a +10 1 a +1 2 b +2 2 b +3 2 b +4 2 b +5 2 b +6 1 b +7 1 b +8 1 b +9 1 b +10 1 b + diff --git a/regression-test/data/external_table_p0/tvf/test_hdfs_tvf.out b/regression-test/data/external_table_p0/tvf/test_hdfs_tvf.out index 68e310c5a4f71e..3f12b44858136c 100644 --- a/regression-test/data/external_table_p0/tvf/test_hdfs_tvf.out +++ b/regression-test/data/external_table_p0/tvf/test_hdfs_tvf.out @@ -615,3 +615,47 @@ string_col text Yes false \N NONE string_col text Yes false \N NONE tinyint_col tinyint Yes false \N NONE +-- !create_view -- +0 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +1 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +10 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +100 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +101 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +102 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +103 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +104 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +105 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +106 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +107 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +108 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +109 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +11 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +110 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +111 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +112 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +113 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +114 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 +115 2 3 4 5 6.6 7.7 8.8 abc def ghiaaaaaa 2020-10-10 2020-10-10 11:12:59 + +-- !alter_view -- +0 +1 +10 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +11 +110 +111 +112 +113 +114 +115 + diff --git a/regression-test/data/external_table_p0/tvf/upgrade/test.out b/regression-test/data/external_table_p0/tvf/upgrade/test.out new file mode 100644 index 00000000000000..12289c2571814a --- /dev/null +++ b/regression-test/data/external_table_p0/tvf/upgrade/test.out @@ -0,0 +1,45 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !create_view -- +0 +1 +10 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +11 +110 +111 +112 +113 +114 +115 + +-- !alter_view -- +0 +1 +10 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +11 +110 +111 +112 +113 +114 +115 + diff --git a/regression-test/data/external_table_p2/maxcompute/test_external_catalog_maxcompute.out b/regression-test/data/external_table_p2/maxcompute/test_external_catalog_maxcompute.out index e1479672f232e7..8e2dbfd52b2a51 100644 --- a/regression-test/data/external_table_p2/maxcompute/test_external_catalog_maxcompute.out +++ b/regression-test/data/external_table_p2/maxcompute/test_external_catalog_maxcompute.out @@ -139,3 +139,21 @@ yy=2023/mm=08/dd=05/pt=5 5 2023 08 05 5 2023 08 05 +-- !null_1 -- +1 1 +2 \N +3 \N +4 4 +5 \N +6 6 + +-- !null_2 -- +1 1 +4 4 +6 6 + +-- !null_3 -- +2 \N +3 \N +5 \N + diff --git a/regression-test/data/fault_injection_p0/cloud/test_cloud_mow_broker_load_with_retry.out b/regression-test/data/fault_injection_p0/cloud/test_cloud_mow_broker_load_with_retry.out new file mode 100644 index 00000000000000..9369fd5ae32f4f --- /dev/null +++ b/regression-test/data/fault_injection_p0/cloud/test_cloud_mow_broker_load_with_retry.out @@ -0,0 +1,7 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select -- +19 + +-- !select -- +19 + diff --git a/regression-test/data/fault_injection_p0/cloud/test_cloud_mow_insert_with_retry.out b/regression-test/data/fault_injection_p0/cloud/test_cloud_mow_insert_with_retry.out new file mode 100644 index 00000000000000..f3a322f2b9f552 --- /dev/null +++ b/regression-test/data/fault_injection_p0/cloud/test_cloud_mow_insert_with_retry.out @@ -0,0 +1,8 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 1 1 +2 2 2 + +-- !sql -- +1 1 1 + diff --git a/regression-test/data/insert_p0/group_commit/replay_wal_restart_fe.out b/regression-test/data/insert_p0/group_commit/replay_wal_restart_fe.out new file mode 100644 index 00000000000000..5bce44e92ef4b8 --- /dev/null +++ b/regression-test/data/insert_p0/group_commit/replay_wal_restart_fe.out @@ -0,0 +1,4 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_1 -- +2 1 + diff --git a/regression-test/data/insert_p0/insert_group_commit_into.out b/regression-test/data/insert_p0/insert_group_commit_into.out index 8c0cc138c471fd..44e6a32fd56158 100644 --- a/regression-test/data/insert_p0/insert_group_commit_into.out +++ b/regression-test/data/insert_p0/insert_group_commit_into.out @@ -107,111 +107,3 @@ q 50 1 test 2 or --- !select1 -- -1 a 10 -2 b -1 -3 c -1 -4 \N -1 -5 q 50 -6 \N -1 - --- !select2 -- -1 a 10 -1 a 10 -2 b -1 -2 b -1 -3 c -1 -3 c -1 -4 e1 -1 -5 q 50 -5 q 50 -6 \N -1 -6 \N -1 - --- !select3 -- -1 a \N 10 -1 a \N 10 -1 a \N 10 -2 b \N -1 -2 b \N -1 -2 b \N -1 -3 c \N -1 -3 c \N -1 -3 c \N -1 -4 \N \N -1 -4 e1 \N -1 -5 q \N 50 -5 q \N 50 -5 q \N 50 -6 \N \N -1 -6 \N \N -1 -6 \N \N -1 - --- !select4 -- -2 b \N -1 -6 \N \N -1 - --- !select5 -- -1 a 10 5 -2 b -1 \N -2 b -1 \N -3 c -1 \N -4 \N -1 \N -5 q 50 6 -6 \N -1 \N -6 \N -1 \N - --- !select6 -- -1 a 10 -1 a 10 -2 b -1 -2 b -1 -2 b -1 -3 c -1 -3 c -1 -4 \N -1 -4 \N -1 -5 q 50 -5 q 50 -6 \N -1 -6 \N -1 -6 \N -1 - --- !select7 -- -\N -1 -\N -1 -\N -1 -\N -1 -\N -1 -\N -1 -\N -1 -a 10 -a 10 -a 10 -b -1 -b -1 -b -1 -b -1 -c -1 -c -1 -c -1 -q 50 -q 50 -q 50 - --- !sql -- -0 service_46da0dab-e27d-4820-aea2-9bfc15741615 1697032066304 0 3229b7cd-f3a2-4359-aa24-946388c9cc54 0 CgEwEiQzMjI5YjdjZC1mM2EyLTQzNTktYWEyNC05NDYzODhjOWNjNTQaggQY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAEY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAIY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAMY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAQY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAUY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAYY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAcY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAgY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAkY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAoY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAsY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAwY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA0Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA4Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA8Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBAY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBEY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBIY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBMY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5IixzZXJ2aWNlXzQ2ZGEwZGFiLWUyN2QtNDgyMC1hZWEyLTliZmMxNTc0MTYxNSo0c2VydmljZV9pbnN0YW5jZWFjODlhNGI3LTgxZjctNDNlOC04NWVkLWQyYjU3OGQ5ODA1MA== 1697032066304 36b2d9ff-4c25-49f3-a726-eea812564411 355f96cd-b1b1-4688-a5f6-a8e3f3a55c9a false 3 service_instanceac89a4b7-81f7-43e8-85ed-d2b578d98050 statement: b9903670-3821-4f4c-a587-bbcf02c04b77 ["[tagKey_5=tagValue_5, tagKey_3=tagValue_3, tagKey_1=tagValue_1, tagKey_16=tagValue_16, tagKey_8=tagValue_8, tagKey_15=tagValue_15, tagKey_6=tagValue_6, tagKey_11=tagValue_11, tagKey_10=tagValue_10, tagKey_4=tagValue_4, tagKey_13=tagValue_13, tagKey_14=tagValue_14, tagKey_2=tagValue_2, tagKey_17=tagValue_17, tagKey_19=tagValue_19, tagKey_0=tagValue_0, tagKey_18=tagValue_18, tagKey_9=tagValue_9, tagKey_7=tagValue_7, tagKey_12=tagValue_12]"] - --- !order -- -2023-06-10 cib2205045_1_1s 0.0000 168939.0 0.0000 0.0 0.0000 0.0 0.0000 day - --- !order2 -- -1 1 1 -2 2 2 -3 3 3 -4 4 4 - --- !select8 -- -1 test -2 or - diff --git a/regression-test/data/insert_p0/insert_group_commit_into_max_filter_ratio.out b/regression-test/data/insert_p0/insert_group_commit_into_max_filter_ratio.out index 241c03af460f11..bcb14bea525713 100644 --- a/regression-test/data/insert_p0/insert_group_commit_into_max_filter_ratio.out +++ b/regression-test/data/insert_p0/insert_group_commit_into_max_filter_ratio.out @@ -1,10 +1,4 @@ -- This file is automatically generated. You should know what you did if you want to edit this --- !sql -- -1 a 10 -2 \N -1 -3 a 10 -9 a \N - -- !sql -- 1 a 10 2 \N -1 diff --git a/regression-test/data/insert_p0/insert_group_commit_into_unique.out b/regression-test/data/insert_p0/insert_group_commit_into_unique.out index b672c6f4cde57e..2d91d1be970e28 100644 --- a/regression-test/data/insert_p0/insert_group_commit_into_unique.out +++ b/regression-test/data/insert_p0/insert_group_commit_into_unique.out @@ -87,91 +87,3 @@ 21 b 21 7 0 22 b 22 6 0 --- !sql -- -1 a 10 1 -2 b -1 0 -3 c -1 0 -4 \N -1 0 -5 q 50 0 -6 \N -1 0 -11 a 21 0 -12 b 22 0 -13 c 23 0 -14 d 24 0 -15 c 23 0 -16 d 24 1 -27 e 25 0 -110 a 211 0 -120 b 23 1 - --- !sql -- -2 b -1 0 -3 c -1 0 -4 \N -1 0 -5 q 50 0 -6 \N -1 0 -11 a 21 0 -12 b 22 0 -13 c 23 0 -14 d 24 0 -15 c 23 0 -27 e 25 0 -110 a 211 0 - --- !sql -- -1 a 10 10 1 -2 b 30 30 0 -3 c 30 30 0 -4 \N 70 70 0 -5 q 50 50 0 -6 \N 60 60 0 -11 a 21 21 0 -12 b 22 22 0 -13 c 23 23 0 -14 d 24 24 0 -15 c 23 23 0 -16 d 24 24 1 -27 e 25 25 0 -110 a 211 211 0 -120 b 23 23 1 - --- !sql -- -2 b 30 30 0 -3 c 30 30 0 -4 \N 70 70 0 -5 q 50 50 0 -6 \N 60 60 0 -11 a 21 21 0 -12 b 22 22 0 -13 c 23 23 0 -14 d 24 24 0 -15 c 23 23 0 -27 e 25 25 0 -110 a 211 211 0 - --- !sql -- -1 a 200 200 1 -2 b 30 200 0 -3 c 30 300 0 -5 q 50 500 0 -6 \N 60 600 0 -10 a 10 11 0 -11 a 11 12 1 -12 a 12 10 0 -13 a 13 10 0 -20 b 20 8 0 -21 b 21 7 0 -22 b 22 6 0 - --- !sql -- -2 b 30 200 0 -3 c 30 300 0 -5 q 50 500 0 -6 \N 60 600 0 -10 a 10 11 0 -12 a 12 10 0 -13 a 13 10 0 -20 b 20 8 0 -21 b 21 7 0 -22 b 22 6 0 - diff --git a/regression-test/data/insert_p0/insert_group_commit_into_unique_sync_mode.out b/regression-test/data/insert_p0/insert_group_commit_into_unique_sync_mode.out index b672c6f4cde57e..2d91d1be970e28 100644 --- a/regression-test/data/insert_p0/insert_group_commit_into_unique_sync_mode.out +++ b/regression-test/data/insert_p0/insert_group_commit_into_unique_sync_mode.out @@ -87,91 +87,3 @@ 21 b 21 7 0 22 b 22 6 0 --- !sql -- -1 a 10 1 -2 b -1 0 -3 c -1 0 -4 \N -1 0 -5 q 50 0 -6 \N -1 0 -11 a 21 0 -12 b 22 0 -13 c 23 0 -14 d 24 0 -15 c 23 0 -16 d 24 1 -27 e 25 0 -110 a 211 0 -120 b 23 1 - --- !sql -- -2 b -1 0 -3 c -1 0 -4 \N -1 0 -5 q 50 0 -6 \N -1 0 -11 a 21 0 -12 b 22 0 -13 c 23 0 -14 d 24 0 -15 c 23 0 -27 e 25 0 -110 a 211 0 - --- !sql -- -1 a 10 10 1 -2 b 30 30 0 -3 c 30 30 0 -4 \N 70 70 0 -5 q 50 50 0 -6 \N 60 60 0 -11 a 21 21 0 -12 b 22 22 0 -13 c 23 23 0 -14 d 24 24 0 -15 c 23 23 0 -16 d 24 24 1 -27 e 25 25 0 -110 a 211 211 0 -120 b 23 23 1 - --- !sql -- -2 b 30 30 0 -3 c 30 30 0 -4 \N 70 70 0 -5 q 50 50 0 -6 \N 60 60 0 -11 a 21 21 0 -12 b 22 22 0 -13 c 23 23 0 -14 d 24 24 0 -15 c 23 23 0 -27 e 25 25 0 -110 a 211 211 0 - --- !sql -- -1 a 200 200 1 -2 b 30 200 0 -3 c 30 300 0 -5 q 50 500 0 -6 \N 60 600 0 -10 a 10 11 0 -11 a 11 12 1 -12 a 12 10 0 -13 a 13 10 0 -20 b 20 8 0 -21 b 21 7 0 -22 b 22 6 0 - --- !sql -- -2 b 30 200 0 -3 c 30 300 0 -5 q 50 500 0 -6 \N 60 600 0 -10 a 10 11 0 -12 a 12 10 0 -13 a 13 10 0 -20 b 20 8 0 -21 b 21 7 0 -22 b 22 6 0 - diff --git a/regression-test/data/insert_p0/insert_with_null.out b/regression-test/data/insert_p0/insert_with_null.out index c4f2e363aa5beb..b170ccdc0872d1 100644 --- a/regression-test/data/insert_p0/insert_with_null.out +++ b/regression-test/data/insert_p0/insert_with_null.out @@ -56,23 +56,3 @@ -- !sql -- 22 \N \N --- !sql -- -1 "b" ["k1=v1, k2=v2"] -2 N ["k3=v3, k4=v4"] -3 null [] -4 NULL ["k5, k6"] -5 \N ["k7", "k8"] -6 \n ["k7", "k8"] -7 ["k7", "k8"] -8 ["k7", "k8"] -9 "a ["k7", "k8"] -10 a" ["k7", "k8"] -22 \N \N - --- !sql -- -5 \N ["k7", "k8"] -22 \N \N - --- !sql -- -22 \N \N - diff --git a/regression-test/data/inverted_index_p0/count-on-index.json b/regression-test/data/inverted_index_p0/count-on-index.json new file mode 100644 index 00000000000000..6ef96d081c657a --- /dev/null +++ b/regression-test/data/inverted_index_p0/count-on-index.json @@ -0,0 +1,100 @@ +{"a": "2024-03-25", "b": "ISZHfv2OQ4", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2023-02-20", "b": "0MgsB3vcIf", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-11-13", "b": "tczLqYSRhY", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2023-08-13", "b": "f86oV0P4s8", "c": "c2", "d": "d2", "e": "e2"} +{"a": "2023-07-13", "b": "YHD5LihHpK", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2024-07-26", "b": "iSJxD3yKvH", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2023-12-25", "b": "aNiLbwF1vg", "c": "c1", "d": "d2", "e": "e2"} +{"a": "2024-04-04", "b": "DGih88jW0H", "c": "c3", "d": "d3", "e": "e2"} +{"a": "2024-03-30", "b": "kxqqsrD1RH", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2023-10-01", "b": "eySjxEnJvW", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2023-09-19", "b": "lHAJZpcky5", "c": "c2", "d": "d2", "e": "e1"} +{"a": "2023-11-25", "b": "PB4l4NsNAn", "c": "c1", "d": "d3", "e": "e2"} +{"a": "2023-05-10", "b": "TP5M3xQDCj", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2024-08-28", "b": "b1D8xLX4DN", "c": "c3", "d": "d2", "e": "e1"} +{"a": "2023-05-31", "b": "9gTsOoFITb", "c": "c2", "d": "d3", "e": "e2"} +{"a": "2023-01-14", "b": "rsv96RalgR", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2024-08-31", "b": "Uz67DnurlH", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-04-05", "b": "kPlM5F56kj", "c": "c2", "d": "d3", "e": "e2"} +{"a": "2023-09-18", "b": "wSGVCB6s3I", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-01-28", "b": "2ZpPp1y5G5", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-03-27", "b": "VwdMxBUnrc", "c": "c3", "d": "d1", "e": "e1"} +{"a": "2024-03-17", "b": "QPV71OyuZ2", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2023-08-10", "b": "pyhphs1Mj4", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2024-01-26", "b": "wxRO18q0EY", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2023-11-04", "b": "vfVK2TsjTl", "c": "c2", "d": "d1", "e": "e2"} +{"a": "2023-04-27", "b": "eS6vCuQAXP", "c": "c3", "d": "d2", "e": "e1"} +{"a": "2024-02-25", "b": "6dx8DMPK9f", "c": "c2", "d": "d3", "e": "e2"} +{"a": "2023-04-16", "b": "3aJhtwXa7E", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2024-07-08", "b": "Ue9xroXnHI", "c": "c3", "d": "d3", "e": "e2"} +{"a": "2023-08-19", "b": "gVPYCdN2eY", "c": "c2", "d": "d3", "e": "e2"} +{"a": "2024-02-24", "b": "qAaaKQpvd3", "c": "c2", "d": "d2", "e": "e1"} +{"a": "2024-01-10", "b": "XsNcGPnvvC", "c": "c2", "d": "d1", "e": "e2"} +{"a": "2023-03-04", "b": "BD7CEdzIhP", "c": "c2", "d": "d1", "e": "e1"} +{"a": "2023-03-18", "b": "sXESaouuHE", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2023-06-28", "b": "DD3RE2pufi", "c": "c2", "d": "d3", "e": "e2"} +{"a": "2024-04-07", "b": "RdEFKIz8QW", "c": "c2", "d": "d1", "e": "e2"} +{"a": "2024-05-10", "b": "u3OhzAL2LH", "c": "c3", "d": "d1", "e": "e1"} +{"a": "2024-07-20", "b": "U0n5EVKjPm", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2024-08-07", "b": "TXypE2ItVh", "c": "c3", "d": "d2", "e": "e1"} +{"a": "2024-08-08", "b": "8g3hPyCB2B", "c": "c3", "d": "d2", "e": "e1"} +{"a": "2023-01-05", "b": "aT6WlbObnZ", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-06-05", "b": "mVqMi8Rzfi", "c": "c1", "d": "d1", "e": "e2"} +{"a": "2024-02-12", "b": "HxpmQ0draG", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2024-05-20", "b": "R5a7gA61KY", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2023-04-19", "b": "QacPa5V0Fj", "c": "c3", "d": "d3", "e": "e1"} +{"a": "2023-07-29", "b": "dwT8GxkWDA", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2024-06-29", "b": "UCRkZWVEhK", "c": "c3", "d": "d1", "e": "e2"} +{"a": "2023-02-22", "b": "yMSAdFkaq9", "c": "c3", "d": "d1", "e": "e1"} +{"a": "2024-04-18", "b": "6Aa4VUyj7b", "c": "c2", "d": "d1", "e": "e2"} +{"a": "2023-09-20", "b": "xet5tOBGLy", "c": "c1", "d": "d1", "e": "e2"} +{"a": "2024-07-09", "b": "kyE5wM71uC", "c": "c1", "d": "d1", "e": "e2"} +{"a": "2024-03-05", "b": "J9UtyRClVj", "c": "c2", "d": "d2", "e": "e2"} +{"a": "2024-08-02", "b": "BnNajVStTq", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2024-03-20", "b": "YFyjBh6JeE", "c": "c1", "d": "d3", "e": "e1"} +{"a": "2024-01-25", "b": "kF462Dpave", "c": "c1", "d": "d1", "e": "e2"} +{"a": "2023-11-20", "b": "uUsipxur13", "c": "c1", "d": "d3", "e": "e2"} +{"a": "2024-08-06", "b": "g4i1sEGImS", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2024-02-09", "b": "NUz4tjvKt5", "c": "c3", "d": "d1", "e": "e2"} +{"a": "2024-04-20", "b": "p72Gn18tWd", "c": "c3", "d": "d3", "e": "e2"} +{"a": "2024-01-22", "b": "3DriXIqvSg", "c": "c2", "d": "d3", "e": "e2"} +{"a": "2024-07-26", "b": "rorCsbghiO", "c": "c1", "d": "d1", "e": "e2"} +{"a": "2023-10-26", "b": "XAWPiEQVmE", "c": "c3", "d": "d1", "e": "e1"} +{"a": "2023-10-30", "b": "L3FWcbrzen", "c": "c1", "d": "d2", "e": "e2"} +{"a": "2024-06-25", "b": "Lj6SZ26GJN", "c": "c3", "d": "d3", "e": "e1"} +{"a": "2023-07-20", "b": "U6nYzFhfwM", "c": "c2", "d": "d2", "e": "e2"} +{"a": "2023-06-26", "b": "J7jWtTmtZT", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2024-05-27", "b": "hDWYIRDHV4", "c": "c2", "d": "d2", "e": "e2"} +{"a": "2023-05-23", "b": "kvjedf4zF8", "c": "c3", "d": "d3", "e": "e1"} +{"a": "2023-02-06", "b": "RsPN2cGb2L", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2024-06-04", "b": "belvas0y6p", "c": "c3", "d": "d1", "e": "e2"} +{"a": "2024-06-17", "b": "J6vYAcFuGZ", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2024-04-18", "b": "qHuHh0Y29i", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2023-08-20", "b": "1GS5UtXMdz", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2024-01-23", "b": "gnjJ4TZ6A6", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2023-02-10", "b": "LX6ddQvIX2", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2024-03-02", "b": "MkImkgiAfm", "c": "c3", "d": "d1", "e": "e2"} +{"a": "2023-11-30", "b": "tGmu0DD8W0", "c": "c3", "d": "d3", "e": "e1"} +{"a": "2023-02-01", "b": "NgzjCOPAku", "c": "c1", "d": "d3", "e": "e2"} +{"a": "2023-01-03", "b": "5mWMiuYwbi", "c": "c3", "d": "d1", "e": "e2"} +{"a": "2023-02-12", "b": "9324TZqLjh", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-06-24", "b": "7fAYU4XSma", "c": "c1", "d": "d3", "e": "e1"} +{"a": "2024-08-28", "b": "iNvBMy8AB8", "c": "c1", "d": "d1", "e": "e1"} +{"a": "2024-06-08", "b": "nwJjFx21my", "c": "c3", "d": "d1", "e": "e1"} +{"a": "2023-03-15", "b": "Oonv8eGNIF", "c": "c3", "d": "d2", "e": "e1"} +{"a": "2024-04-22", "b": "6UYDEqQxxf", "c": "c2", "d": "d2", "e": "e2"} +{"a": "2024-05-08", "b": "aVPqpTufJf", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2024-08-02", "b": "z1baLbjnTj", "c": "c3", "d": "d3", "e": "e2"} +{"a": "2023-07-05", "b": "vMLMALySMJ", "c": "c3", "d": "d3", "e": "e1"} +{"a": "2023-08-30", "b": "s7VZKlOG27", "c": "c3", "d": "d3", "e": "e2"} +{"a": "2023-03-06", "b": "gWAuqLvHpJ", "c": "c2", "d": "d1", "e": "e2"} +{"a": "2024-02-24", "b": "2IQL6pazn3", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2024-08-15", "b": "4YbxyhwaRF", "c": "c3", "d": "d2", "e": "e1"} +{"a": "2023-02-28", "b": "wAOOLl8Kqj", "c": "c1", "d": "d2", "e": "e2"} +{"a": "2024-05-31", "b": "FTQKB8rURb", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2023-02-20", "b": "knNNhnMXLN", "c": "c1", "d": "d3", "e": "e2"} +{"a": "2024-07-01", "b": "cxA5xpl6NM", "c": "c2", "d": "d1", "e": "e2"} +{"a": "2023-05-10", "b": "5FnmdQtOA0", "c": "c1", "d": "d2", "e": "e1"} +{"a": "2024-08-21", "b": "ldggIaWfYF", "c": "c3", "d": "d2", "e": "e2"} +{"a": "2024-01-05", "b": "7lwOfCQs5o", "c": "c2", "d": "d3", "e": "e1"} +{"a": "2024-08-21", "b": "2zvnCMAkZG", "c": "c1", "d": "d1", "e": "e2"} diff --git a/regression-test/data/inverted_index_p0/test_count_on_index.out b/regression-test/data/inverted_index_p0/test_count_on_index.out index 59910b7fb5d6a8..3c0f47e7f8baf9 100644 --- a/regression-test/data/inverted_index_p0/test_count_on_index.out +++ b/regression-test/data/inverted_index_p0/test_count_on_index.out @@ -74,3 +74,6 @@ -- !sql -- 0 +-- !sql_bad -- +0 1 + diff --git a/regression-test/data/manager/test_manager_interface_1.out b/regression-test/data/manager/test_manager_interface_1.out index 0f50524d648565..0a432e959cc899 100644 --- a/regression-test/data/manager/test_manager_interface_1.out +++ b/regression-test/data/manager/test_manager_interface_1.out @@ -5,19 +5,19 @@ internal test_manager_metadata_name_ids \N -- !metadata_2 -- internal test_manager_metadata_name_ids test_metadata_name_ids --- !metadata_2 -- +-- !metadata_3 -- -- !tables_1 -- -k1 TINYINT Yes true \N -k2 DECIMAL(10, 2) Yes true 10.05 -k3 CHAR(10) Yes true \N BLOOM_FILTER -k4 INT No false 1 NONE -k5 TEXT Yes false \N NONE,BLOOM_FILTER +k1 tinyint Yes true \N +k2 decimal(10,2) Yes true 10.05 +k3 char(10) Yes true \N BLOOM_FILTER +k4 int No false 1 NONE +k5 text Yes false \N NONE,BLOOM_FILTER -- !tables_2 -- -test_manager_tb_1 DUP_KEYS k1 TINYINT TINYINT Yes true \N true - k2 DECIMAL(10, 2) DECIMALV3(10, 2) Yes true 10.05 true - k3 CHAR(10) CHAR(10) Yes true \N BLOOM_FILTER true - k4 INT INT No false 1 NONE true - k5 TEXT TEXT Yes false \N NONE,BLOOM_FILTER true +test_manager_tb_1 DUP_KEYS k1 tinyint tinyint Yes true \N true + k2 DECIMAL(10, 2) decimalv3(10,2) Yes true 10.05 true + k3 char(10) char(10) Yes true \N BLOOM_FILTER true + k4 int int No false 1 NONE true + k5 text text Yes false \N NONE,BLOOM_FILTER true diff --git a/regression-test/data/nereids_function_p0/agg_function/test_covar_samp.out b/regression-test/data/nereids_function_p0/agg_function/test_covar_samp.out index 728beed6cc42c2..9d0444d522face 100644 --- a/regression-test/data/nereids_function_p0/agg_function/test_covar_samp.out +++ b/regression-test/data/nereids_function_p0/agg_function/test_covar_samp.out @@ -1,6 +1,6 @@ -- This file is automatically generated. You should know what you did if you want to edit this -- !sql -- -1 +1.0 -- !sql -- -1.5 @@ -12,4 +12,8 @@ 4.5 -- !sql -- -1.666667 \ No newline at end of file +1.666666666666666 + +-- !notnull3 -- +1.666666666666666 + diff --git a/regression-test/data/nereids_function_p0/scalar_function/R.out b/regression-test/data/nereids_function_p0/scalar_function/R.out index e90736971184c1..a4fe94bbeb1fa2 100644 --- a/regression-test/data/nereids_function_p0/scalar_function/R.out +++ b/regression-test/data/nereids_function_p0/scalar_function/R.out @@ -429,6 +429,12 @@ xyz -- !sql_relace_empty08 -- abc +-- !sql_relace_empty09 -- +b你bab好bbb世bcb界b + +-- !sql_relace_empty10 -- +你a好b世c界 + -- !sql_right_Varchar_Integer -- \N 1 diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query1.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query1.out index 5d7945424490f3..3de47c9c458719 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query1.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject -----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF2 ctr_store_sk->[ctr_store_sk,s_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF1 s_store_sk->[ctr_store_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 ----------------------PhysicalProject ------------------------filter((store.s_state = 'TN')) ---------------------------PhysicalOlapScan[store] +--------------------------PhysicalOlapScan[store] apply RFs: RF2 ------------------hashAgg[GLOBAL] --------------------PhysicalDistribute[DistributionSpecHash] ----------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out index 7ba438a17299bf..16316c630a5d62 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 1999) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 1999) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 1998) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1998) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out index b9db768ddeacaf..61fba77c876be0 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out @@ -97,16 +97,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject --------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +120,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +143,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query23.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query23.out index 480608162ed25e..5e2eafb60ffe8d 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query23.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 7) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 7) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query30.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query30.out index 8fa4c693e96896..f1da603468bba0 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query30.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query30.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query31.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query31.out index cd14820e81832f..6ee14db3186630 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query31.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 1999)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 1999)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 1999)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 1999)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 1999)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 1999)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query39.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query39.out index 147a805ba706ea..a4c06fd0323918 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query39.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out index 553c4daa7f32c9..57c89d80df9807 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id,customer_id,customer_id] ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) --------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query41.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query41.out index c27e19cc9f2387..2dd4aadeae29a7 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query41.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 744) and (i1.i_manufact_id >= 704)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query47.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query47.out index 1b0e2008159e0d..d2fce02a945701 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query47.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query57.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query57.out index a9ecbe3ebe3dda..da06fcebfa0dfe 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query57.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category,i_category];RF8 i_brand->[i_brand,i_brand];RF9 cc_name->[cc_name,cc_name];RF10 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query64.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query64.out index 5428e0140904cb..9af385e063936f 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query64.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query64.out @@ -91,10 +91,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 1999)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2000)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out index ebb6f5a717b8c0..8b171914ebd371 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL) > if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL) > if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query75.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query75.out index 51355f21bc2517..c26b81b87791ba 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query75.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 2002)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 2001)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query81.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query81.out index afbd05d111b8d6..465ebcbaaafba1 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query81.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query81.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/nereids_p0/hint/multi_leading.out b/regression-test/data/nereids_p0/hint/multi_leading.out index 08b6b83ed584f5..ce74020695d0c8 100644 --- a/regression-test/data/nereids_p0/hint/multi_leading.out +++ b/regression-test/data/nereids_p0/hint/multi_leading.out @@ -24,17 +24,17 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN colocated] hashCondition=((cte.c1 = t1.c1)) otherCondition=() -----------filter((t1.c1 > 300)) -------------PhysicalOlapScan[t1] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((cte.c1 = t2.c2)) otherCondition=() ------------filter((cte.c1 > 300)) --------------PhysicalOlapScan[t1] ------------filter((t2.c2 > 300)) --------------PhysicalOlapScan[t2] +----------filter((t1.c1 > 300)) +------------PhysicalOlapScan[t1] Hint log: -Used: leading(t1 t2 ) leading(t1 cte ) -UnUsed: +Used: +UnUsed: leading(t1 t2) leading(t1 cte) SyntaxError: -- !sql1_4 -- @@ -43,17 +43,17 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN colocated] hashCondition=((cte.c1 = t1.c1)) otherCondition=() -----------filter((t1.c1 > 300)) -------------PhysicalOlapScan[t1] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((cte.c1 = t2.c2)) otherCondition=() ------------filter((cte.c1 > 300)) --------------PhysicalOlapScan[t1] ------------filter((t2.c2 > 300)) --------------PhysicalOlapScan[t2] +----------filter((t1.c1 > 300)) +------------PhysicalOlapScan[t1] Hint log: -Used: leading(t1 t2 ) leading(t1 cte ) -UnUsed: +Used: +UnUsed: leading(t1 t2) leading(t1 cte) SyntaxError: -- !sql1_res_1 -- @@ -74,14 +74,14 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() -----------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = t2.c2)) otherCondition=() ------------PhysicalOlapScan[t1] ------------PhysicalOlapScan[t2] +----------PhysicalOlapScan[t3] Hint log: -Used: leading(t3 alias1 ) -UnUsed: +Used: +UnUsed: leading(t3 alias1) SyntaxError: -- !sql2_3 -- @@ -91,13 +91,13 @@ PhysicalResultSink ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t2.c2)) otherCondition=() -------------PhysicalOlapScan[t2] ------------PhysicalOlapScan[t1] +------------PhysicalOlapScan[t2] ----------PhysicalOlapScan[t3] Hint log: -Used: leading(t2 t1 ) -UnUsed: +Used: +UnUsed: leading(t2 t1) SyntaxError: -- !sql2_4 -- @@ -106,14 +106,14 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() -----------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t2.c2)) otherCondition=() -------------PhysicalOlapScan[t2] ------------PhysicalOlapScan[t1] +------------PhysicalOlapScan[t2] +----------PhysicalOlapScan[t3] Hint log: -Used: leading(t2 t1 ) leading(t3 alias1 ) -UnUsed: +Used: +UnUsed: leading(t2 t1) leading(t3 alias1) SyntaxError: -- !sql2_res_1 -- @@ -135,17 +135,17 @@ PhysicalResultSink ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = cte.c11)) otherCondition=() ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() -------------PhysicalOlapScan[t3] ------------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = t2.c2)) otherCondition=() --------------PhysicalOlapScan[t1] --------------PhysicalOlapScan[t2] +------------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((cte.c1 = t2.c2)) otherCondition=() -------------PhysicalOlapScan[t2] ------------PhysicalOlapScan[t1] +------------PhysicalOlapScan[t2] Hint log: -Used: leading(t2 t1 ) leading(t3 alias1 cte ) -UnUsed: +Used: +UnUsed: leading(t2 t1) leading(t3 alias1 cte) SyntaxError: -- !sql3_3 -- @@ -156,16 +156,16 @@ PhysicalResultSink --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = cte.c11)) otherCondition=() ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() ------------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t2.c2)) otherCondition=() ---------------PhysicalOlapScan[t2] --------------PhysicalOlapScan[t1] +--------------PhysicalOlapScan[t2] ------------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((cte.c1 = t2.c2)) otherCondition=() ------------PhysicalOlapScan[t1] ------------PhysicalOlapScan[t2] Hint log: -Used: leading(t2 t1 ) -UnUsed: +Used: +UnUsed: leading(t2 t1) SyntaxError: -- !sql3_4 -- @@ -175,17 +175,17 @@ PhysicalResultSink ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = cte.c11)) otherCondition=() ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() -------------PhysicalOlapScan[t3] ------------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t2.c2)) otherCondition=() ---------------PhysicalOlapScan[t2] --------------PhysicalOlapScan[t1] +--------------PhysicalOlapScan[t2] +------------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((cte.c1 = t2.c2)) otherCondition=() -------------PhysicalOlapScan[t2] ------------PhysicalOlapScan[t1] +------------PhysicalOlapScan[t2] Hint log: -Used: leading(t2 t1 ) leading(t2 t1 ) leading(t3 alias1 cte ) -UnUsed: +Used: +UnUsed: leading(t2 t1) leading(t2 t1) leading(t3 alias1 cte) SyntaxError: -- !sql3_res_1 -- @@ -206,16 +206,16 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() -----------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = alias2.c2)) otherCondition=() ------------PhysicalOlapScan[t1] ------------hashJoin[INNER_JOIN broadcast] hashCondition=((t2.c2 = t4.c4)) otherCondition=() --------------PhysicalOlapScan[t2] --------------PhysicalOlapScan[t4] +----------PhysicalOlapScan[t3] Hint log: -Used: leading(t3 alias1 ) -UnUsed: +Used: +UnUsed: leading(t3 alias1) SyntaxError: -- !sql4_2 -- @@ -225,15 +225,15 @@ PhysicalResultSink ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = alias2.c2)) otherCondition=() +------------PhysicalOlapScan[t1] ------------hashJoin[INNER_JOIN broadcast] hashCondition=((t2.c2 = t4.c4)) otherCondition=() --------------PhysicalOlapScan[t2] --------------PhysicalOlapScan[t4] -------------PhysicalOlapScan[t1] ----------PhysicalOlapScan[t3] Hint log: -Used: leading(alias2 t1 ) -UnUsed: +Used: +UnUsed: leading(alias2 t1) SyntaxError: -- !sql4_3 -- @@ -245,13 +245,13 @@ PhysicalResultSink ----------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = alias2.c2)) otherCondition=() ------------PhysicalOlapScan[t1] ------------hashJoin[INNER_JOIN broadcast] hashCondition=((t2.c2 = t4.c4)) otherCondition=() ---------------PhysicalOlapScan[t4] --------------PhysicalOlapScan[t2] +--------------PhysicalOlapScan[t4] ----------PhysicalOlapScan[t3] Hint log: -Used: leading(t4 t2 ) -UnUsed: +Used: +UnUsed: leading(t4 t2) SyntaxError: -- !sql4_4 -- @@ -260,16 +260,16 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = t3.c3)) otherCondition=() -----------PhysicalOlapScan[t3] ----------hashJoin[INNER_JOIN broadcast] hashCondition=((alias1.c1 = alias2.c2)) otherCondition=() +------------PhysicalOlapScan[t1] ------------hashJoin[INNER_JOIN broadcast] hashCondition=((t2.c2 = t4.c4)) otherCondition=() --------------PhysicalOlapScan[t2] --------------PhysicalOlapScan[t4] -------------PhysicalOlapScan[t1] +----------PhysicalOlapScan[t3] Hint log: -Used: leading(alias2 t1 ) leading(t3 alias1 ) -UnUsed: +Used: +UnUsed: leading(alias2 t1) leading(t3 alias1) SyntaxError: -- !sql4_res_0 -- @@ -311,12 +311,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecGather] ------------hashAgg[LOCAL] ---------------hashJoin[INNER_JOIN shuffle] hashCondition=((t1.c1 = cte.c11)) otherCondition=() -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = cte.c11)) otherCondition=() ----------------PhysicalOlapScan[t1] +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) Hint log: -Used: leading(cte t1 ) -UnUsed: +Used: +UnUsed: leading(cte t1) SyntaxError: diff --git a/regression-test/data/nereids_p0/hint/test_hint.out b/regression-test/data/nereids_p0/hint/test_hint.out index 66a218b09fe45d..f7128e7d15cf26 100644 --- a/regression-test/data/nereids_p0/hint/test_hint.out +++ b/regression-test/data/nereids_p0/hint/test_hint.out @@ -40,12 +40,12 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = t2.c2)) otherCondition=() -----------PhysicalOlapScan[t2] ----------PhysicalOlapScan[t1] +----------PhysicalOlapScan[t2] Hint log: -Used: leading(t2 broadcast t1 ) -UnUsed: +Used: +UnUsed: leading(t2 broadcast t1) SyntaxError: -- !select1_6 -- @@ -54,12 +54,12 @@ PhysicalResultSink ----PhysicalDistribute[DistributionSpecGather] ------hashAgg[LOCAL] --------hashJoin[INNER_JOIN broadcast] hashCondition=((t1.c1 = t2.c2)) otherCondition=() -----------PhysicalOlapScan[t2] ----------PhysicalOlapScan[t1] +----------PhysicalOlapScan[t2] Hint log: -Used: leading(t2 broadcast t1 ) -UnUsed: +Used: +UnUsed: leading(t2 broadcast t1) SyntaxError: -- !select1_7 -- diff --git a/regression-test/data/nereids_p0/insert_into_table/random.out b/regression-test/data/nereids_p0/insert_into_table/random.out index d42426a991f801..dd5bdc8e1d9bb0 100644 --- a/regression-test/data/nereids_p0/insert_into_table/random.out +++ b/regression-test/data/nereids_p0/insert_into_table/random.out @@ -135,3 +135,6 @@ 13 12 20480.0 48640045.000000 10944010779 2012-03-12 2012-03-12T12:11:12 22.634 13 12 20480.0 48640045.000000 10944010779 2012-03-12 2012-03-12T12:11:12 22.634 +-- !sql_select -- +1 11 11 + diff --git a/regression-test/data/nereids_p0/sql_functions/aggregate_functions/test_aggregate_window_functions.out b/regression-test/data/nereids_p0/sql_functions/aggregate_functions/test_aggregate_window_functions.out new file mode 100644 index 00000000000000..6729ea26bc1005 --- /dev/null +++ b/regression-test/data/nereids_p0/sql_functions/aggregate_functions/test_aggregate_window_functions.out @@ -0,0 +1,822 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !agg_window_approx_count_distinct -- +beijing 3 +beijing 3 +beijing 3 +beijing 3 +chongqing 2 +chongqing 2 +hubei 3 +hubei 3 +hubei 3 +sichuan 2 +sichuan 2 +sichuan 2 +sichuan 2 + +-- !agg_window_count_by_enum -- +beijing [{"cbe":{"liuxiang":1},"notnull":1,"null":0,"all":1}] +beijing [{"cbe":{"liuxiang2":1,"liuxiang":1},"notnull":2,"null":0,"all":2}] +beijing [{"cbe":{"wangmang":2,"liuxiang2":1,"liuxiang":1},"notnull":4,"null":0,"all":4}] +beijing [{"cbe":{"wangmang":2,"liuxiang2":1,"liuxiang":1},"notnull":4,"null":0,"all":4}] +chongqing [{"cbe":{"lisi":1},"notnull":1,"null":0,"all":1}] +chongqing [{"cbe":{"lisi2":1,"lisi":1},"notnull":2,"null":0,"all":2}] +hubei [{"cbe":{"quyuan":1},"notnull":1,"null":0,"all":1}] +hubei [{"cbe":{"wangwu":1,"quyuan":1},"notnull":2,"null":0,"all":2}] +hubei [{"cbe":{"wangwu2":1,"wangwu":1,"quyuan":1},"notnull":3,"null":0,"all":3}] +sichuan [{"cbe":{"zhangsan":2},"notnull":2,"null":1,"all":3}] +sichuan [{"cbe":{"zhangsan":2},"notnull":2,"null":1,"all":3}] +sichuan [{"cbe":{"zhuge":1,"zhangsan":2},"notnull":3,"null":1,"all":4}] +sichuan [{"cbe":{},"notnull":0,"null":1,"all":1}] + +-- !agg_window_avg_weighted -- +1 4.787199468947368E8 +1 4.787199468947368E8 +1 4.787199468947368E8 +1 4.787199468947368E8 +1 4.787199468947368E8 +1 4.787199468947368E8 +1 4.787199468947368E8 +2 2.7563070204347825E8 +2 2.7563070204347825E8 +2 2.7563070204347825E8 +2 2.7563070204347825E8 +2 2.7563070204347825E8 +3 3.4926324084193546E8 +3 3.4926324084193546E8 +3 3.4926324084193546E8 +3 3.4926324084193546E8 +3 3.4926324084193546E8 + +-- !agg_window_corr -- +1 0.7619441542729813 +1 0.7619441542729813 +1 0.7619441542729813 +1 0.7619441542729813 +1 0.7619441542729813 +1 0.7619441542729813 +1 0.7619441542729813 +2 0.4220928990265549 +2 0.4220928990265549 +2 0.4220928990265549 +2 0.4220928990265549 +2 0.4220928990265549 +3 0.9986446796711458 +3 0.9986446796711458 +3 0.9986446796711458 +3 0.9986446796711458 +3 0.9986446796711458 + +-- !agg_window_covar_samp -- +1 9.266666666666666E10 +1 9.266666666666666E10 +1 9.266666666666666E10 +1 9.266666666666666E10 +1 9.266666666666666E10 +1 9.266666666666666E10 +1 9.266666666666666E10 +2 1.85E10 +2 1.85E10 +2 1.85E10 +2 1.85E10 +2 1.85E10 +3 1.5666666662333337E11 +3 1.5666666662333337E11 +3 1.5666666662333337E11 +3 1.5666666662333337E11 +3 1.5666666662333337E11 + +-- !agg_window_covar_pop -- +1 7.722222222222223E10 +1 7.722222222222223E10 +1 7.722222222222223E10 +1 7.722222222222223E10 +1 7.722222222222223E10 +1 7.722222222222223E10 +1 7.722222222222223E10 +2 1.48E10 +2 1.48E10 +2 1.48E10 +2 1.48E10 +2 1.48E10 +3 1.174999999675E11 +3 1.174999999675E11 +3 1.174999999675E11 +3 1.174999999675E11 +3 1.174999999675E11 + +-- !agg_window_variance_pop -- +1 2.2448979591836736E16 +1 2.2448979591836736E16 +1 2.2448979591836736E16 +1 2.2448979591836736E16 +1 2.2448979591836736E16 +1 2.2448979591836736E16 +1 2.2448979591836736E16 +2 1.36E16 +2 1.36E16 +2 1.36E16 +2 1.36E16 +2 1.36E16 +3 1.0399999990400002E16 +3 1.0399999990400002E16 +3 1.0399999990400002E16 +3 1.0399999990400002E16 +3 1.0399999990400002E16 + +-- !agg_window_stddev_pop -- +1 1.498298354528788E8 +1 1.498298354528788E8 +1 1.498298354528788E8 +1 1.498298354528788E8 +1 1.498298354528788E8 +1 1.498298354528788E8 +1 1.498298354528788E8 +2 1.16619037896906E8 +2 1.16619037896906E8 +2 1.16619037896906E8 +2 1.16619037896906E8 +2 1.16619037896906E8 +3 1.0198039022478783E8 +3 1.0198039022478783E8 +3 1.0198039022478783E8 +3 1.0198039022478783E8 +3 1.0198039022478783E8 + +-- !agg_window_variance_samp -- +1 2.6190476190476192E16 +1 2.6190476190476192E16 +1 2.6190476190476192E16 +1 2.6190476190476192E16 +1 2.6190476190476192E16 +1 2.6190476190476192E16 +1 2.6190476190476192E16 +2 1.7E16 +2 1.7E16 +2 1.7E16 +2 1.7E16 +2 1.7E16 +3 1.2999999988000002E16 +3 1.2999999988000002E16 +3 1.2999999988000002E16 +3 1.2999999988000002E16 +3 1.2999999988000002E16 + +-- !agg_window_stddev_samp -- +1 1.618347187425374E8 +1 1.618347187425374E8 +1 1.618347187425374E8 +1 1.618347187425374E8 +1 1.618347187425374E8 +1 1.618347187425374E8 +1 1.618347187425374E8 +2 1.3038404810405298E8 +2 1.3038404810405298E8 +2 1.3038404810405298E8 +2 1.3038404810405298E8 +2 1.3038404810405298E8 +3 1.1401754245729032E8 +3 1.1401754245729032E8 +3 1.1401754245729032E8 +3 1.1401754245729032E8 +3 1.1401754245729032E8 + +-- !agg_window_group_bit_or -- +1 2044 +1 2044 +1 2044 +1 2044 +1 2044 +1 2044 +1 2044 +2 1020 +2 1020 +2 1020 +2 1020 +2 1020 +3 4088 +3 4088 +3 4088 +3 4088 +3 4088 + +-- !agg_window_group_bit_and -- +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +2 0 +2 0 +2 0 +2 0 +2 0 +3 128 +3 128 +3 128 +3 128 +3 128 + +-- !agg_window_group_bit_xor -- +1 1392 +1 1392 +1 1392 +1 1392 +1 1392 +1 1392 +1 1392 +2 708 +2 708 +2 708 +2 708 +2 708 +3 3912 +3 3912 +3 3912 +3 3912 +3 3912 + +-- !agg_window_bitmap_agg -- +1 100,200,300,1000,2000 +1 100,200,300,1000,2000 +1 100,200,300,1000,2000 +1 100,200,300,1000,2000 +1 100,200,300,1000,2000 +1 100,200,300,1000,2000 +1 100,200,300,1000,2000 +2 100,300,400,500,1000 +2 100,300,400,500,1000 +2 100,300,400,500,1000 +2 100,300,400,500,1000 +2 100,300,400,500,1000 +3 200,1000,2000,3000 +3 200,1000,2000,3000 +3 200,1000,2000,3000 +3 200,1000,2000,3000 +3 200,1000,2000,3000 + +-- !agg_window_bitmap_union_int -- +1 5 +1 5 +1 5 +1 5 +1 5 +1 5 +1 5 +2 5 +2 5 +2 5 +2 5 +2 5 +3 4 +3 4 +3 4 +3 4 +3 4 + +-- !agg_window_histogram -- +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +1 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"200","upper":"200","ndv":1,"count":2,"pre_sum":1},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":5}]} +2 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":1},{"lower":"400","upper":"400","ndv":1,"count":1,"pre_sum":2},{"lower":"500","upper":"500","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4}]} +2 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":1},{"lower":"400","upper":"400","ndv":1,"count":1,"pre_sum":2},{"lower":"500","upper":"500","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4}]} +2 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":1},{"lower":"400","upper":"400","ndv":1,"count":1,"pre_sum":2},{"lower":"500","upper":"500","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4}]} +2 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":1},{"lower":"400","upper":"400","ndv":1,"count":1,"pre_sum":2},{"lower":"500","upper":"500","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4}]} +2 {"num_buckets":5,"buckets":[{"lower":"100","upper":"100","ndv":1,"count":1,"pre_sum":0},{"lower":"300","upper":"300","ndv":1,"count":1,"pre_sum":1},{"lower":"400","upper":"400","ndv":1,"count":1,"pre_sum":2},{"lower":"500","upper":"500","ndv":1,"count":1,"pre_sum":3},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":4}]} +3 {"num_buckets":4,"buckets":[{"lower":"200","upper":"200","ndv":1,"count":1,"pre_sum":0},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":1},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":2},{"lower":"3000","upper":"3000","ndv":1,"count":1,"pre_sum":3}]} +3 {"num_buckets":4,"buckets":[{"lower":"200","upper":"200","ndv":1,"count":1,"pre_sum":0},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":1},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":2},{"lower":"3000","upper":"3000","ndv":1,"count":1,"pre_sum":3}]} +3 {"num_buckets":4,"buckets":[{"lower":"200","upper":"200","ndv":1,"count":1,"pre_sum":0},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":1},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":2},{"lower":"3000","upper":"3000","ndv":1,"count":1,"pre_sum":3}]} +3 {"num_buckets":4,"buckets":[{"lower":"200","upper":"200","ndv":1,"count":1,"pre_sum":0},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":1},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":2},{"lower":"3000","upper":"3000","ndv":1,"count":1,"pre_sum":3}]} +3 {"num_buckets":4,"buckets":[{"lower":"200","upper":"200","ndv":1,"count":1,"pre_sum":0},{"lower":"1000","upper":"1000","ndv":1,"count":1,"pre_sum":1},{"lower":"2000","upper":"2000","ndv":1,"count":1,"pre_sum":2},{"lower":"3000","upper":"3000","ndv":1,"count":1,"pre_sum":3}]} + +-- !agg_window_max_by -- +1 \N \N +1 100 123456789.0000000001 +1 1000 523456789.0000000005 +1 200 223456789.0000000004 +1 200 223456789.0000000004 +1 2000 523456789.0000000005 +1 300 423456789.0000000005 +2 100 323456789.0000000005 +2 1000 423456789.0000000005 +2 300 223456789.0000000005 +2 400 123456789.0000000005 +2 500 123456789.0000000006 +3 \N \N +3 1000 223456789.6000000005 +3 200 123456789.1000000005 +3 2000 323456789.1000000005 +3 3000 423456789.2000000005 + +-- !agg_window_min_by -- +1 \N \N +1 100 123456789.0000000001 +1 1000 123456789.0000000001 +1 200 123456789.0000000001 +1 200 123456789.0000000001 +1 2000 123456789.0000000001 +1 300 123456789.0000000001 +2 100 323456789.0000000005 +2 1000 323456789.0000000005 +2 300 323456789.0000000005 +2 400 323456789.0000000005 +2 500 323456789.0000000005 +3 \N \N +3 1000 123456789.1000000005 +3 200 123456789.1000000005 +3 2000 123456789.1000000005 +3 3000 123456789.1000000005 + +-- !agg_window_any_value -- +1 123456789.0000000001 +1 123456789.0000000001 +1 123456789.0000000001 +1 123456789.0000000001 +1 123456789.0000000001 +1 123456789.0000000001 +1 123456789.0000000001 +2 123456789.0000000005 +2 123456789.0000000005 +2 123456789.0000000005 +2 123456789.0000000005 +2 123456789.0000000005 +3 123456789.1000000005 +3 123456789.1000000005 +3 123456789.1000000005 +3 123456789.1000000005 +3 123456789.1000000005 + +-- !agg_window_percentile -- +1 5.23456789E8 +1 5.23456789E8 +1 5.23456789E8 +1 5.23456789E8 +1 5.23456789E8 +1 5.23456789E8 +1 5.23456789E8 +2 4.03456789E8 +2 4.03456789E8 +2 4.03456789E8 +2 4.03456789E8 +2 4.03456789E8 +3 4.03456789E8 +3 4.03456789E8 +3 4.03456789E8 +3 4.03456789E8 +3 4.03456789E8 + +-- !agg_window_percentile_array -- +1 [273456789, 423456789, 523456789] +1 [273456789, 423456789, 523456789] +1 [273456789, 423456789, 523456789] +1 [273456789, 423456789, 523456789] +1 [273456789, 423456789, 523456789] +1 [273456789, 423456789, 523456789] +1 [273456789, 423456789, 523456789] +2 [123456789, 223456789, 323456789] +2 [123456789, 223456789, 323456789] +2 [123456789, 223456789, 323456789] +2 [123456789, 223456789, 323456789] +2 [123456789, 223456789, 323456789] +3 [223456789, 223456789, 323456789] +3 [223456789, 223456789, 323456789] +3 [223456789, 223456789, 323456789] +3 [223456789, 223456789, 323456789] +3 [223456789, 223456789, 323456789] + +-- !agg_window_percentile_approx -- +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +2 4.234568E8 +2 4.234568E8 +2 4.234568E8 +2 4.234568E8 +2 4.234568E8 +3 4.234568E8 +3 4.234568E8 +3 4.234568E8 +3 4.234568E8 +3 4.234568E8 + +-- !agg_window_percentile_approx_weighted -- +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +1 5.234568E8 +2 4.234568E8 +2 4.234568E8 +2 4.234568E8 +2 4.234568E8 +2 4.234568E8 +3 4.234568E8 +3 4.234568E8 +3 4.234568E8 +3 4.234568E8 +3 4.234568E8 + +-- !agg_window_topn -- +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +1 {"523456789.0000000005":3,"423456789.0000000005":1,"323456789.0000000002":1} +2 {"423456789.0000000005":1,"323456789.0000000005":1,"223456789.0000000005":1} +2 {"423456789.0000000005":1,"323456789.0000000005":1,"223456789.0000000005":1} +2 {"423456789.0000000005":1,"323456789.0000000005":1,"223456789.0000000005":1} +2 {"423456789.0000000005":1,"323456789.0000000005":1,"223456789.0000000005":1} +2 {"423456789.0000000005":1,"323456789.0000000005":1,"223456789.0000000005":1} +3 {"223456789.6000000005":2,"423456789.2000000005":1,"323456789.1000000005":1} +3 {"223456789.6000000005":2,"423456789.2000000005":1,"323456789.1000000005":1} +3 {"223456789.6000000005":2,"423456789.2000000005":1,"323456789.1000000005":1} +3 {"223456789.6000000005":2,"423456789.2000000005":1,"323456789.1000000005":1} +3 {"223456789.6000000005":2,"423456789.2000000005":1,"323456789.1000000005":1} + +-- !agg_window_topn_weighted -- +1 [523456789, 423456789, 323456789] +1 [523456789, 423456789, 323456789] +1 [523456789, 423456789, 323456789] +1 [523456789, 423456789, 323456789] +1 [523456789, 423456789, 323456789] +1 [523456789, 423456789, 323456789] +1 [523456789, 423456789, 323456789] +2 [423456789, 123456789, 223456789] +2 [423456789, 123456789, 223456789] +2 [423456789, 123456789, 223456789] +2 [423456789, 123456789, 223456789] +2 [423456789, 123456789, 223456789] +3 [423456789.2, 323456789.1, 223456789.6] +3 [423456789.2, 323456789.1, 223456789.6] +3 [423456789.2, 323456789.1, 223456789.6] +3 [423456789.2, 323456789.1, 223456789.6] +3 [423456789.2, 323456789.1, 223456789.6] + +-- !agg_window_topn_array -- +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +1 [523456789.0000000005, 423456789.0000000005, 323456789.0000000002] +2 [423456789.0000000005, 323456789.0000000005, 223456789.0000000005] +2 [423456789.0000000005, 323456789.0000000005, 223456789.0000000005] +2 [423456789.0000000005, 323456789.0000000005, 223456789.0000000005] +2 [423456789.0000000005, 323456789.0000000005, 223456789.0000000005] +2 [423456789.0000000005, 323456789.0000000005, 223456789.0000000005] +3 [223456789.6000000005, 423456789.2000000005, 323456789.1000000005] +3 [223456789.6000000005, 423456789.2000000005, 323456789.1000000005] +3 [223456789.6000000005, 423456789.2000000005, 323456789.1000000005] +3 [223456789.6000000005, 423456789.2000000005, 323456789.1000000005] +3 [223456789.6000000005, 423456789.2000000005, 323456789.1000000005] + +-- !agg_window_multi_distinct_count -- +1 5 +1 5 +1 5 +1 5 +1 5 +1 5 +1 5 +2 5 +2 5 +2 5 +2 5 +2 5 +3 4 +3 4 +3 4 +3 4 +3 4 + +-- !agg_window_multi_distinct_sum -- +1 11320987615.0000000119 +1 1617283945.0000000017 +1 3234567890.0000000034 +1 4851851835.0000000051 +1 6469135780.0000000068 +1 8086419725.0000000085 +1 9703703670.0000000102 +2 1217283945.0000000026 +2 2434567890.0000000052 +2 3651851835.0000000078 +2 4869135780.0000000104 +2 6086419725.0000000130 +3 1093827157.0000000020 +3 2187654314.0000000040 +3 3281481471.0000000060 +3 4375308628.0000000080 +3 5469135785.0000000100 + +-- !agg_window_bitmap_union -- +1 1,2,3 +1 1,2,3 +1 1,2,3 +1 1,2,3 +2 2,3,4 +2 2,3,4 +2 2,3,4 +3 3,4,5,6 +3 3,4,5,6 +3 3,4,5,6 +3 3,4,5,6 + +-- !agg_window_bitmap_intersect -- +1 1 +1 1 +1 1 +1 1 +2 2 +2 2 +2 2 +3 +3 +3 +3 + +-- !agg_window_group_bitmap_xor -- +1 1,3 +1 1,3 +1 1,3 +1 1,3 +2 2,4 +2 2,4 +2 2,4 +3 3,4,5,6 +3 3,4,5,6 +3 3,4,5,6 +3 3,4,5,6 + +-- !agg_window_bitmap_union_count -- +1 3 +1 3 +1 3 +1 3 +2 3 +2 3 +2 3 +3 4 +3 4 +3 4 +3 4 + +-- !agg_window_collect_list -- +1 ["1,2", "1,2", "1,3"] +1 ["1,2", "1,2"] +1 ["1,2", "1,2"] +1 [] +2 ["2,3", "2,3", "2,4"] +2 ["2,3", "2,3"] +2 ["2,3", "2,3"] +3 ["3", "4", "5", "6"] +3 ["3", "4", "5"] +3 ["3", "4"] +3 ["3"] + +-- !agg_window_collect_set -- +1 ["1,2"] +1 ["1,2"] +1 ["1,3", "1,2"] +1 [] +2 ["2,3", "2,4"] +2 ["2,3"] +2 ["2,3"] +3 ["3", "4"] +3 ["3"] +3 ["5", "3", "4"] +3 ["5", "3", "6", "4"] + +-- !agg_window_array_agg -- +1 [null, "1,2", "1,2", "1,3"] +1 [null, "1,2", "1,2"] +1 [null, "1,2", "1,2"] +1 [null] +2 ["2,3", "2,3", "2,4"] +2 ["2,3", "2,3"] +2 ["2,3", "2,3"] +3 ["3", "4", "5", "6"] +3 ["3", "4", "5"] +3 ["3", "4"] +3 ["3"] + +-- !agg_window_group_concat -- +1 \N +1 1,2,1,2 +1 1,2,1,2 +1 1,2,1,2,1,3 +2 2,3,2,3 +2 2,3,2,3 +2 2,3,2,3,2,4 +3 3 +3 3,4 +3 3,4,5 +3 3,4,5,6 + +-- !agg_window_sum0 -- +1 12 +1 12 +1 12 +1 12 +2 16 +2 16 +2 16 +3 25 +3 25 +3 25 +3 25 + +-- !agg_window_group_array_intersect -- +1 [] +1 [] +1 [] +1 [] +2 ["2"] +2 ["2"] +2 ["2"] +3 ["3", "4"] +3 ["3", "4"] +3 ["3", "4"] +3 ["3", "4"] + +-- !window_func_hll_union_agg -- +beijing linux 3 +beijing macos 3 +beijing windows 3 +hebei windows 1 +jiangsu macos 1 +shanghai linux 2 +shanghai windows 2 +shanxi windows 1 +shanxi windows 1 + +-- !window_func_hll_union -- +beijing linux 3 +beijing macos 3 +beijing windows 3 +hebei windows 1 +jiangsu macos 1 +shanghai linux 2 +shanghai windows 2 +shanxi windows 1 +shanxi windows 1 + +-- !map_agg -- +1 {"LA":"V1_1", "LB":"V1_2", "LC":"V1_3"} +1 {"LA":"V1_1", "LB":"V1_2", "LC":"V1_3"} +1 {"LA":"V1_1", "LB":"V1_2", "LC":"V1_3"} +2 {"LA":"V2_1", "LB":"V2_2", "LC":"V2_3"} +2 {"LA":"V2_1", "LB":"V2_2", "LC":"V2_3"} +2 {"LA":"V2_1", "LB":"V2_2", "LC":"V2_3"} +3 {"LA":"V3_1", "LB":"V3_2", "LC":"V3_3"} +3 {"LA":"V3_1", "LB":"V3_2", "LC":"V3_3"} +3 {"LA":"V3_1", "LB":"V3_2", "LC":"V3_3"} +4 {"LA":"V4_1", "LB":"V4_2", "LC":"V4_3"} +4 {"LA":"V4_1", "LB":"V4_2", "LC":"V4_3"} +4 {"LA":"V4_1", "LB":"V4_2", "LC":"V4_3"} +5 {"LA":"V5_1", "LB":"V5_2", "LC":"V5_3"} +5 {"LA":"V5_1", "LB":"V5_2", "LC":"V5_3"} +5 {"LA":"V5_1", "LB":"V5_2", "LC":"V5_3"} + +-- !agg_window_quantile_union -- +20220201 0 1.0 +20220201 1 1.0 + +-- !agg_window_retention_0 -- +1 [1, 1] +2 [1, 0] +3 [0, 0] +4 [0, 0] +5 [1, 1] + +-- !agg_window_retention_1 -- +1 [1, 1] +1 [1, 1] +2 [1, 0] +2 [1, 0] +3 [0, 0] +3 [0, 0] +4 [0, 0] +5 [1, 1] +5 [1, 1] +5 [1, 1] + +-- !agg_window_sequence_match -- +1 true +1 true +1 true +1 true +1 true +1 true +1 true +1 true +2 false +2 false +2 false +2 false +2 false + +-- !agg_window_sequence_count -- +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +1 2 +2 0 +2 0 +2 0 +2 0 +2 0 + +-- !agg_window_sum_foreach -- +1 [1, 2, 3] +2 [20] +3 [100] +4 \N +5 [null, 2] + +-- !agg_window_sum_foreach2 -- +1 \N +2 [1, 2, 3] +3 [21, 2, 3] +4 [120] +5 [100] + +-- !agg_window_covar_foreach -- +1 [0, 0, 0] +2 [0] +3 [0] +4 \N +5 [null, 0] + +-- !agg_window_group_concat_state1 -- +1 string1,string1,string2,string2 +1 string1,string1,string2,string2 +1 string1,string1,string2,string2 +1 string1,string1,string2,string2 +2 string3,string3 +2 string3,string3 + +-- !agg_window_group_concat_state_merge -- +1 string1,string1,string2,string2 +1 string1,string1,string2,string2 +1 string1,string1,string2,string2 +1 string1,string1,string2,string2 +2 string3,string3 +2 string3,string3 + +-- !agg_window_orthogonal_bitmap1 -- +1 1,2 +1 1,2 +1 1,2 +2 2,3 +2 2,3 +2 2,3 + +-- !agg_window_orthogonal_bitmap2 -- +1 2 +1 2 +1 2 +2 2 +2 2 +2 2 + +-- !agg_window_orthogonal_bitmap3 -- +1 4 +1 4 +1 4 +2 5 +2 5 +2 5 + +-- !agg_window_window_funnel -- +100123 2 +100123 2 +100123 2 +100123 2 +100123 2 +100125 3 +100125 3 +100125 3 +100126 2 +100126 2 +100127 2 +100127 2 + diff --git a/regression-test/data/nereids_p0/sql_functions/datetime_functions/test_date_function.out b/regression-test/data/nereids_p0/sql_functions/datetime_functions/test_date_function.out index 8c256e42d57f0d..2aef8a1257aeb5 100644 --- a/regression-test/data/nereids_p0/sql_functions/datetime_functions/test_date_function.out +++ b/regression-test/data/nereids_p0/sql_functions/datetime_functions/test_date_function.out @@ -147,6 +147,12 @@ -- !sql -- 2009-10-04 +-- !sql_date_format_long -- +\N + +-- !sql_date_format_long -- +\N + -- !sql -- 2008-11-30T23:59:59 @@ -476,6 +482,12 @@ February -- !sql -- 1 2022-08-01 17:00:31 +-- !sql -- +1 \N + +-- !sql -- +1 \N + -- !sql -- true @@ -494,6 +506,9 @@ true -- !sql_date_format_long -- \N +-- !sql_date_format_long -- +\N + -- !sql -- \N diff --git a/regression-test/data/nereids_p0/sql_functions/math_functions/test_conv.out b/regression-test/data/nereids_p0/sql_functions/math_functions/test_conv.out index e05c1a3437ae86..95e67e726d6ac5 100644 --- a/regression-test/data/nereids_p0/sql_functions/math_functions/test_conv.out +++ b/regression-test/data/nereids_p0/sql_functions/math_functions/test_conv.out @@ -11,3 +11,6 @@ -- !select4 -- 18446744073709551615 +-- !select5 -- +1 1.464868 + diff --git a/regression-test/data/nereids_p0/sql_functions/string_functions/test_string_function.out b/regression-test/data/nereids_p0/sql_functions/string_functions/test_string_function.out index e8305c284ff520..d85794989f7de0 100644 --- a/regression-test/data/nereids_p0/sql_functions/string_functions/test_string_function.out +++ b/regression-test/data/nereids_p0/sql_functions/string_functions/test_string_function.out @@ -386,3 +386,63 @@ tNEW-STRorigin str -- !sql -- d***is +-- !sub_replace_utf8_sql1 -- +你a世界 + +-- !sub_replace_utf8_sql2 -- +你ab界 + +-- !sub_replace_utf8_sql3 -- +你ab + +-- !sub_replace_utf8_sql4 -- +你abcd我界 + +-- !sub_replace_utf8_sql5 -- +\N + +-- !sub_replace_utf8_sql6 -- +大家世界 + +-- !sub_replace_utf8_sql7 -- +你大家114514 + +-- !sub_replace_utf8_sql8 -- +\N + +-- !sub_replace_utf8_sql9 -- +\N + +-- !sub_replace_utf8_sql10 -- +\N + +-- !sub_replace_utf8_sql1 -- +你a世界 + +-- !sub_replace_utf8_sql2 -- +你ab界 + +-- !sub_replace_utf8_sql3 -- +你ab + +-- !sub_replace_utf8_sql4 -- +你abcd我界 + +-- !sub_replace_utf8_sql5 -- +\N + +-- !sub_replace_utf8_sql6 -- +大家世界 + +-- !sub_replace_utf8_sql7 -- +你大家114514 + +-- !sub_replace_utf8_sql8 -- +\N + +-- !sub_replace_utf8_sql9 -- +\N + +-- !sub_replace_utf8_sql10 -- +\N + diff --git a/regression-test/data/nereids_p0/subquery/correlated_scalar_subquery.out b/regression-test/data/nereids_p0/subquery/correlated_scalar_subquery.out new file mode 100644 index 00000000000000..9414a5c9f61bca --- /dev/null +++ b/regression-test/data/nereids_p0/subquery/correlated_scalar_subquery.out @@ -0,0 +1,108 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_where1 -- +1 +1 + +-- !select_where2 -- +1 +1 + +-- !select_where3 -- + +-- !select_where4 -- +1 +1 +2 +2 +3 + +-- !select_where5 -- + +-- !select_where6 -- +2 +2 + +-- !select_where7 -- +\N +\N +2 +2 +3 +3 +20 +22 +24 + +-- !select_where8 -- +\N +\N +1 +1 +2 +2 +3 +3 +20 +22 +24 + +-- !select_where9 -- +\N +\N +1 +1 +2 +2 +3 +3 +20 +22 +24 + +-- !select_where10 -- +\N +\N +1 +1 +2 +2 +3 +3 +20 +22 +24 + +-- !select_where11 -- + +-- !select_project1 -- +\N \N +1 \N +2 \N +3 6 +20 \N +22 \N +24 \N + +-- !select_project2 -- +\N \N +1 \N +2 \N +3 6 +20 \N +22 \N +24 \N + +-- !select_join1 -- +3 4 + +-- !select_join2 -- +3 4 + +-- !select_having1 -- +1 +1 + +-- !select_having2 -- +1 +1 + diff --git a/regression-test/data/nereids_rules_p0/cte/test_cte_filter_pushdown.out b/regression-test/data/nereids_rules_p0/cte/test_cte_filter_pushdown.out index 0bbae0dc25f3a1..7dd6492aa12499 100644 --- a/regression-test/data/nereids_rules_p0/cte/test_cte_filter_pushdown.out +++ b/regression-test/data/nereids_rules_p0/cte/test_cte_filter_pushdown.out @@ -7,9 +7,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------filter((main.k1 = 1)) ----------PhysicalOlapScan[test] --PhysicalResultSink -----hashJoin[INNER_JOIN] hashCondition=((m1.k1 = m2.k1)) otherCondition=() +----hashJoin[INNER_JOIN] hashCondition=((m1.k1 = m2.k1)) otherCondition=() build RFs:RF0 k1->[k1] ------filter((temp.k1 = 1)) ---------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF0 ------filter((m2.k1 = 1)) --------PhysicalCteConsumer ( cteId=CTEId#0 ) @@ -21,9 +21,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalOlapScan[test] --PhysicalResultSink -----hashJoin[INNER_JOIN] hashCondition=((m1.k1 = m2.k1)) otherCondition=() +----hashJoin[INNER_JOIN] hashCondition=((m1.k1 = m2.k1)) otherCondition=() build RFs:RF0 k1->[k1] ------filter((temp.k1 = 1)) ---------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF0 ------filter((m2.k1 = 1)) --------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_rules_p0/eager_aggregate/basic.out b/regression-test/data/nereids_rules_p0/eager_aggregate/basic.out index 3e3986c75fc91c..ba18189efcad82 100644 --- a/regression-test/data/nereids_rules_p0/eager_aggregate/basic.out +++ b/regression-test/data/nereids_rules_p0/eager_aggregate/basic.out @@ -32,8 +32,8 @@ PhysicalResultSink --hashAgg[GLOBAL] ----hashAgg[LOCAL] ------hashJoin[INNER_JOIN] hashCondition=((a.device_id = b.device_id)) otherCondition=() ---------PhysicalOlapScan[shunt_log_com_dd_library] --------PhysicalOlapScan[com_dd_library] +--------PhysicalOlapScan[shunt_log_com_dd_library] -- !with_hint_1 -- PhysicalResultSink @@ -83,8 +83,8 @@ PhysicalResultSink --hashAgg[GLOBAL] ----hashAgg[LOCAL] ------hashJoin[INNER_JOIN] hashCondition=((a.device_id = b.device_id)) otherCondition=() ---------PhysicalOlapScan[shunt_log_com_dd_library] --------PhysicalOlapScan[com_dd_library] +--------PhysicalOlapScan[shunt_log_com_dd_library] Hint log: Used: diff --git a/regression-test/data/nereids_rules_p0/eager_aggregate/basic_one_side.out b/regression-test/data/nereids_rules_p0/eager_aggregate/basic_one_side.out index 49f1cc9617a090..aaf6afeca1e9f6 100644 --- a/regression-test/data/nereids_rules_p0/eager_aggregate/basic_one_side.out +++ b/regression-test/data/nereids_rules_p0/eager_aggregate/basic_one_side.out @@ -32,8 +32,8 @@ PhysicalResultSink --hashAgg[GLOBAL] ----hashAgg[LOCAL] ------hashJoin[INNER_JOIN] hashCondition=((a.device_id = b.device_id)) otherCondition=() ---------PhysicalOlapScan[shunt_log_com_dd_library_one_side] --------PhysicalOlapScan[com_dd_library_one_side] +--------PhysicalOlapScan[shunt_log_com_dd_library_one_side] -- !with_hint_1 -- PhysicalResultSink @@ -83,8 +83,8 @@ PhysicalResultSink --hashAgg[GLOBAL] ----hashAgg[LOCAL] ------hashJoin[INNER_JOIN] hashCondition=((a.device_id = b.device_id)) otherCondition=() ---------PhysicalOlapScan[shunt_log_com_dd_library_one_side] --------PhysicalOlapScan[com_dd_library_one_side] +--------PhysicalOlapScan[shunt_log_com_dd_library_one_side] Hint log: Used: diff --git a/regression-test/data/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.out b/regression-test/data/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.out index eff17e438e33c3..d92655e4e73e0b 100644 --- a/regression-test/data/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.out +++ b/regression-test/data/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.out @@ -11,17 +11,17 @@ PhysicalResultSink PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() -------PhysicalOlapScan[t] ------filter((t1.score > 10)) --------PhysicalOlapScan[t] +------PhysicalOlapScan[t] -- !full_outer_join -- PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] -----hashJoin[RIGHT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() -------PhysicalOlapScan[t] +----hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ------filter((t1.score > 10)) --------PhysicalOlapScan[t] +------PhysicalOlapScan[t] -- !full_outer_join -- PhysicalResultSink @@ -53,10 +53,10 @@ PhysicalResultSink PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t3.id)) otherCondition=() -------hashJoin[RIGHT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ---------PhysicalOlapScan[t] +------hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() --------filter((t1.score > 10)) ----------PhysicalOlapScan[t] +--------PhysicalOlapScan[t] ------PhysicalOlapScan[t] -- !multiple_left_outer_2 -- @@ -73,30 +73,30 @@ PhysicalResultSink PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t3.id)) otherCondition=() -------PhysicalOlapScan[t] ------hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ---------PhysicalOlapScan[t] --------filter((t1.score > 10)) ----------PhysicalOlapScan[t] +--------PhysicalOlapScan[t] +------PhysicalOlapScan[t] -- !multiple_right_outer_2 -- PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t3.id)) otherCondition=() -------PhysicalOlapScan[t] ------hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() --------PhysicalOlapScan[t] --------filter((t2.score > 10)) ----------PhysicalOlapScan[t] +------PhysicalOlapScan[t] -- !multiple_full_outer_1 -- PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t3.id)) otherCondition=() -------hashJoin[RIGHT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ---------PhysicalOlapScan[t] +------hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() --------filter((t1.score > 10)) ----------PhysicalOlapScan[t] +--------PhysicalOlapScan[t] ------PhysicalOlapScan[t] -- !multiple_full_outer_2 -- @@ -112,10 +112,10 @@ PhysicalResultSink -- !left_outer_join_non_null_assertion -- PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] -----hashJoin[RIGHT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() -------PhysicalOlapScan[t] +----hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ------filter(( not id IS NULL) and (t1.score > 5)) --------PhysicalOlapScan[t] +------PhysicalOlapScan[t] -- !right_outer_join_non_null_assertion -- PhysicalResultSink @@ -138,9 +138,9 @@ PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t2.id = t3.id)) otherCondition=() ------hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ---------PhysicalOlapScan[t] --------filter((t1.score > 5)) ----------PhysicalOlapScan[t] +--------PhysicalOlapScan[t] ------filter(( not score IS NULL)) --------PhysicalOlapScan[t] @@ -161,7 +161,7 @@ PhysicalResultSink ----PhysicalProject ------filter((count(id) > 1)) --------hashAgg[LOCAL] -----------hashJoin[RIGHT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() +----------hashJoin[LEFT_OUTER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() ------------PhysicalProject --------------PhysicalOlapScan[t] ------------PhysicalProject @@ -181,28 +181,28 @@ PhysicalResultSink PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() -------filter(( not name IS NULL)) ---------PhysicalOlapScan[t] ------filter((t1.score > 10)) --------PhysicalOlapScan[t] +------filter(( not name IS NULL)) +--------PhysicalOlapScan[t] -- !right_outer -- PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() -------filter(( not name IS NULL)) ---------PhysicalOlapScan[t] ------filter((t1.score > 10)) --------PhysicalOlapScan[t] +------filter(( not name IS NULL)) +--------PhysicalOlapScan[t] -- !full_outer -- PhysicalResultSink --PhysicalDistribute[DistributionSpecGather] ----hashJoin[INNER_JOIN colocated] hashCondition=((t1.id = t2.id)) otherCondition=() -------filter(( not name IS NULL)) ---------PhysicalOlapScan[t] ------filter((t1.score > 10)) --------PhysicalOlapScan[t] +------filter(( not name IS NULL)) +--------PhysicalOlapScan[t] -- !self_left_outer -- PhysicalResultSink diff --git a/regression-test/data/nereids_rules_p0/infer_predicate/infer_intersect_except.out b/regression-test/data/nereids_rules_p0/infer_predicate/infer_intersect_except.out index 783f83efe61753..2609ca5f4c9e23 100644 --- a/regression-test/data/nereids_rules_p0/infer_predicate/infer_intersect_except.out +++ b/regression-test/data/nereids_rules_p0/infer_predicate/infer_intersect_except.out @@ -58,10 +58,10 @@ PhysicalResultSink ----filter((infer_intersect_except1.a > 0)) ------PhysicalOlapScan[infer_intersect_except1] ----PhysicalIntersect -------filter((infer_intersect_except3.a = 1) and (infer_intersect_except3.b = 'abc')) ---------PhysicalOlapScan[infer_intersect_except3] ------filter((infer_intersect_except2.b > 'ab')) --------PhysicalOlapScan[infer_intersect_except2] +------filter((infer_intersect_except3.a = 1) and (infer_intersect_except3.b = 'abc')) +--------PhysicalOlapScan[infer_intersect_except3] -- !except_and_intersect_except_predicate_to_right -- PhysicalResultSink diff --git a/regression-test/data/nereids_rules_p0/mv/union_all_compensate/union_all_compensate.out b/regression-test/data/nereids_rules_p0/mv/union_all_compensate/union_all_compensate.out new file mode 100644 index 00000000000000..7ff775063fbf75 --- /dev/null +++ b/regression-test/data/nereids_rules_p0/mv/union_all_compensate/union_all_compensate.out @@ -0,0 +1,201 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !query1_0_before -- +28 + +-- !query1_0_after -- +28 + +-- !query1_1_before -- +32 + +-- !query1_1_after -- +32 + +-- !query2_0_before -- +a 4 +b 28 + +-- !query2_0_after -- +a 2 +b 26 + +-- !query3_0_before -- +a 4 +b 28 + +-- !query3_0_after -- +a 4 +b 28 + +-- !query4_0_before -- +2024-09-12 8 +2024-09-13 8 +2024-09-14 8 +2024-09-15 8 + +-- !query4_0_after -- +2024-09-12 4 +2024-09-13 8 +2024-09-14 8 +2024-09-15 8 + +-- !query5_0_before -- +2024-09-12 8 +2024-09-13 8 +2024-09-14 8 +2024-09-15 8 + +-- !query5_0_after -- +2024-09-12 8 +2024-09-13 8 +2024-09-14 8 +2024-09-15 8 + +-- !query6_0_before -- +a 1 +a 1 +a 1 +a 1 +a 1 +a 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 + +-- !query6_0_after -- +a 1 +a 1 +a 1 +a 1 +a 1 +a 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 + +-- !query7_0_before -- +a 1 +a 1 +a 1 +a 1 +a 1 +a 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 + +-- !query7_0_after -- +a 1 +a 1 +a 1 +a 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 +b 1 + diff --git a/regression-test/data/nereids_rules_p0/normalize_window/normalize_window_nullable_agg_test.out b/regression-test/data/nereids_rules_p0/normalize_window/normalize_window_nullable_agg_test.out new file mode 100644 index 00000000000000..2df25bb0d3bed2 --- /dev/null +++ b/regression-test/data/nereids_rules_p0/normalize_window/normalize_window_nullable_agg_test.out @@ -0,0 +1,293 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !max -- +\N +2 +3 +3 +3 +5 +5 +5 +5 + +-- !min -- +\N +2 +2 +3 +2 +2 +3 +3 +3 + +-- !sum -- +\N +2 +5 +6 +5 +7 +8 +8 +8 + +-- !avg -- +\N +2.0 +2.5 +3.0 +2.5 +3.5 +4.0 +4.0 +4.0 + +-- !topn -- +\N +{"1":1} +{"2":1,"1":1} +{"3":1,"2":1} +{"4":1,"3":1} +{"5":1,"4":1} +{"6":1,"5":1} +{"7":1,"6":1} +{"8":1,"7":1} + +-- !topn_array -- +\N +[1] +[2, 1] +[3, 2] +[4, 3] +[5, 4] +[6, 5] +[7, 6] +[8, 7] + +-- !topn_weighted -- +\N +[1] +[2, 1] +[3, 2] +[4, 3] +[5, 4] +[6, 5] +[7, 6] +[8, 7] + +-- !max_by -- +\N +2 +3 +3 +2 +5 +3 +5 +3 + +-- !min_by -- +\N +2 +2 +3 +3 +2 +5 +3 +5 + +-- !avg_weighted -- +\N +2.0 +2.5 +3.0 +2.5 +3.5 +4.0 +4.0 +5.0 + +-- !variance -- +\N +0.0 +0.25 +0.0 +0.25 +2.25 +1.0 +1.0 +1.0 + +-- !variance_samp -- +\N +0.0 +0.5 +0.0 +0.5 +4.5 +2.0 +2.0 +2.0 + +-- !percentile -- +\N +2.0 +2.5 +3.0 +2.5 +3.5 +4.0 +4.0 +4.0 + +-- !percentile_approx -- +\N +2.0 +3.0 +3.0 +3.0 +5.0 +5.0 +5.0 +5.0 + +-- !stddev -- +\N +0.0 +0.5 +0.0 +0.5 +1.5 +1.0 +1.0 +1.0 + +-- !stddev_samp -- +\N +0.0 +0.7071067811865476 +0.0 +0.7071067811865476 +2.1213203435596424 +1.4142135623730951 +1.4142135623730951 +1.4142135623730951 + +-- !corr -- +\N +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 + +-- !covar -- +\N +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 + +-- !covar_samp -- +\N +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 + +-- !group_concat -- +\N +1 +1,1 +1,2 +2,2 +2,2 +2,2 +2,2 +2 + +-- !retention -- +\N +[1, 0] +[1, 1] +[1, 1] +[0, 0] +[0, 0] +[0, 0] +[0, 0] +[0, 0] + +-- !group_bit_and -- +\N +1 +1 +0 +2 +2 +2 +2 +2 + +-- !group_bit_or -- +\N +1 +1 +3 +2 +2 +2 +2 +2 + +-- !group_bit_xor -- +\N +1 +0 +3 +0 +0 +0 +0 +2 + +-- !group_bitmap_xor -- +\N +\N +\N +\N +\N +\N +\N +\N +\N + +-- !sum_foreach -- +\N +[1, 2] +[4, 4] +[4, 7] +[4, 7] +[8, 4] +[6, 4] +[2, 4] +[2, 25] + +-- !sequence_match -- +\N +false +false +false + diff --git a/regression-test/data/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.out b/regression-test/data/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.out index 9077ecb24b9b56..898621c7da765c 100644 --- a/regression-test/data/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.out +++ b/regression-test/data/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.out @@ -9,11 +9,11 @@ PhysicalResultSink -- !right_semi -- PhysicalResultSink ---hashJoin[LEFT_SEMI_JOIN] hashCondition=((t1.b = t2.b)) otherCondition=((((t2.a = 9) AND (t1.a = 1)) OR ((t1.a = 2) AND (t2.a = 8)))) -----filter(a IN (8, 9)) -------PhysicalOlapScan[extract_from_disjunction_in_join_t2] +--hashJoin[RIGHT_SEMI_JOIN] hashCondition=((t1.b = t2.b)) otherCondition=((((t2.a = 9) AND (t1.a = 1)) OR ((t1.a = 2) AND (t2.a = 8)))) ----filter(a IN (1, 2)) ------PhysicalOlapScan[extract_from_disjunction_in_join_t1] +----filter(a IN (8, 9)) +------PhysicalOlapScan[extract_from_disjunction_in_join_t2] -- !left -- PhysicalResultSink @@ -24,10 +24,10 @@ PhysicalResultSink -- !right -- PhysicalResultSink ---hashJoin[LEFT_OUTER_JOIN] hashCondition=((t1.b = t2.b)) otherCondition=((((t2.a = 9) AND (t1.a = 1)) OR ((t1.a = 2) AND (t2.a = 8))) and a IN (8, 9)) -----PhysicalOlapScan[extract_from_disjunction_in_join_t2] +--hashJoin[RIGHT_OUTER_JOIN] hashCondition=((t1.b = t2.b)) otherCondition=((((t2.a = 9) AND (t1.a = 1)) OR ((t1.a = 2) AND (t2.a = 8))) and a IN (8, 9)) ----filter(a IN (1, 2)) ------PhysicalOlapScan[extract_from_disjunction_in_join_t1] +----PhysicalOlapScan[extract_from_disjunction_in_join_t2] -- !left_anti -- PhysicalResultSink @@ -38,10 +38,10 @@ PhysicalResultSink -- !right_anti -- PhysicalResultSink ---hashJoin[LEFT_ANTI_JOIN] hashCondition=((t1.b = t2.b)) otherCondition=((((t2.a = 9) AND (t1.a = 1)) OR ((t1.a = 2) AND (t2.a = 8))) and a IN (8, 9)) -----PhysicalOlapScan[extract_from_disjunction_in_join_t2] +--hashJoin[RIGHT_ANTI_JOIN] hashCondition=((t1.b = t2.b)) otherCondition=((((t2.a = 9) AND (t1.a = 1)) OR ((t1.a = 2) AND (t2.a = 8))) and a IN (8, 9)) ----filter(a IN (1, 2)) ------PhysicalOlapScan[extract_from_disjunction_in_join_t1] +----PhysicalOlapScan[extract_from_disjunction_in_join_t2] -- !inner -- PhysicalResultSink diff --git a/regression-test/data/nereids_syntax_p0/mv/aggregate/agg_sync_mv.out b/regression-test/data/nereids_syntax_p0/mv/aggregate/agg_sync_mv.out index 848d637c916a93..f1e41365705d8d 100644 --- a/regression-test/data/nereids_syntax_p0/mv/aggregate/agg_sync_mv.out +++ b/regression-test/data/nereids_syntax_p0/mv/aggregate/agg_sync_mv.out @@ -1469,6 +1469,66 @@ 10 0 11 0 +-- !select_map_agg -- +\N {1:"string1"} +0 {1:"string1"} +1 {2:"string2"} +2 {3:"string3"} +3 {4:"string1"} +4 {5:"string2"} +5 {6:"string3"} +6 {7:"string1"} +7 {8:"string2"} +8 {9:"string3"} +9 {10:"string1"} +10 {11:"string2"} +11 {12:"string3"} + +-- !select_map_agg_mv -- +\N {1:"string1"} +0 {1:"string1"} +1 {2:"string2"} +2 {3:"string3"} +3 {4:"string1"} +4 {5:"string2"} +5 {6:"string3"} +6 {7:"string1"} +7 {8:"string2"} +8 {9:"string3"} +9 {10:"string1"} +10 {11:"string2"} +11 {12:"string3"} + +-- !select_array_agg -- +\N ["null", "null", "string1"] +0 ["string1", "string1"] +1 ["string2", "string2"] +2 ["string3", "string3"] +3 ["string1", "string1"] +4 ["string2", "string2"] +5 ["string3", "string3"] +6 ["string1", "string1"] +7 ["string2", "string2"] +8 ["string3", "string3"] +9 ["string1", "string1"] +10 ["string2", "string2"] +11 ["string3", "string3"] + +-- !select_array_agg_mv -- +\N ["null", "null", "string1"] +0 ["string1", "string1"] +1 ["string2", "string2"] +2 ["string3", "string3"] +3 ["string1", "string1"] +4 ["string2", "string2"] +5 ["string3", "string3"] +6 ["string1", "string1"] +7 ["string2", "string2"] +8 ["string3", "string3"] +9 ["string1", "string1"] +10 ["string2", "string2"] +11 ["string3", "string3"] + -- !select_retention -- \N [0, 0] 0 [0, 0] diff --git a/regression-test/data/nereids_syntax_p0/window_function.out b/regression-test/data/nereids_syntax_p0/window_function.out index 5e91a4682d1bd1..4ec92fc61ad46c 100644 --- a/regression-test/data/nereids_syntax_p0/window_function.out +++ b/regression-test/data/nereids_syntax_p0/window_function.out @@ -359,3 +359,205 @@ 1.0 1.5 +-- !sql -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N + diff --git a/regression-test/data/nereids_syntax_p1/mv/agg_mv_test.dat b/regression-test/data/nereids_syntax_p1/mv/agg_mv_test.dat new file mode 100644 index 00000000000000..1d1d05317a208e --- /dev/null +++ b/regression-test/data/nereids_syntax_p1/mv/agg_mv_test.dat @@ -0,0 +1,27 @@ +0;0;1;1;1;1;1;0.1;0.1;0.1;0.1;0.1;0.1;0.1;0.1;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-01;2012-03-01;2012-03-01 01:00:01;2012-03-01 01:00:01;2012-03-01 01:00:01;2012-03-01 01:00:01;[0];[0];[1];[1];[1];[1];[1];[0.1];[2012-03-01,2012-03-01];[2012-03-01 01:00:01];[2012-03-01,2012-03-01];[2012-03-01 01:00:01,2012-03-01 01:00:01,2012-03-01 01:00:01,2012-03-01 01:00:01];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.1,0.1];ST_Point(12.1, 32.1);ST_Point(12.1, 32.1);5;5;116.4274406433;39.9020987219;0;1;4.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0));{null:1,0:2,0:3};{null:1,1:2,2:3};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1.1:2};{null:1,1.1:2};{null:1,1.100:2};{null:1,"char1":2};{null:1,"vchar1":2};{null:1,"str1":2};{null:1,2012-03-01:2};{null:1,2012-03-01:2};{1:null,2:0,3:0};{1:null,2:1,3:2};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1.1};{1:null,2:1.1};{1:null,2:1.100};{1:null,2:"char1"};{1:null,2:"vchar1"};{1:null,2:"str1"};{1:null,2:2012-03-01};{1:null,2:2012-03-01};[];{"id":1} +1;0;2;2;2;2;2;0.2;0.2;0.2;0.2;0.2;0.2;0.2;0.2;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-02;2012-03-02;2012-03-02 02:01:02;2012-03-02 02:01:02;2012-03-02 02:01:02;2012-03-02 02:01:02;[0];[0];[2];[2];[2];[2];[2];[0.2];[2012-03-02,2012-03-02];[2012-03-02 02:01:02];[2012-03-02,2012-03-02];[2012-03-02 02:01:02,2012-03-02 02:01:02,2012-03-02 02:01:02,2012-03-02 02:01:02];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[0.2,0.2];ST_Point(12.2, 32.2);ST_Point(12.2, 32.2);126.35620117;-39.939093;56.4274406433;66.9020987219;0;2;5.3;LINESTRING (1 1, 2 2);POLYGON ((1 1, 4 1, 4 4, 1 4, 1 1));{null:1,0:2,0:3};{null:1,2:2,1:3};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2.2:2};{null:1,2.2:2};{null:1,2.200:2};{null:1,"char2":2};{null:1,"vchar2":2};{null:1,"str2":2};{null:1,2012-03-02:2};{null:1,2012-03-02:2};{1:null,2:0,3:0};{1:null,2:2,3:1};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2.2};{1:null,2:2.2};{1:null,2:2.200};{1:null,2:"char2"};{1:null,2:"vchar2"};{1:null,2:"str2"};{1:null,2:2012-03-02};{1:null,2:2012-03-02};[];{"id":1} +2;0;3;3;3;3;3;0.3;0.3;0.3;0.3;0.3;0.3;0.3;0.3;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-03;2012-03-03;2012-03-03 03:02:03;2012-03-03 03:02:03;2012-03-03 03:02:03;2012-03-03 03:02:03;[0];[0];[3];[3];[3];[3];[3];[0.3];[2012-03-03,2012-03-03];[2012-03-03 03:02:03];[2012-03-03,2012-03-03];[2012-03-03 03:02:03,2012-03-03 03:02:03,2012-03-03 03:02:03,2012-03-03 03:02:03];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[0.3,0.3];ST_Point(12.3, 32.3);ST_Point(12.3, 32.3);16.35620117;19.939093;76.4274406433;46.9020987219;0;3;7.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 12 0, 10 16, 1 10, 0 0));{null:1,0:2,0:3};{null:1,3:2,0:3};{null:1,3:2};{null:1,3:2};{null:1,3:2};{null:1,3:2};{null:1,3.3:2};{null:1,3.3:2};{null:1,3.300:2};{null:1,"char3":2};{null:1,"vchar3":2};{null:1,"str3":2};{null:1,2012-03-03:2};{null:1,2012-03-03:2};{1:null,2:0,3:0};{1:null,2:3,3:0};{1:null,2:3};{1:null,2:3};{1:null,2:3};{1:null,2:3};{1:null,2:3.3};{1:null,2:3.3};{1:null,2:3.300};{1:null,2:"char3"};{1:null,2:"vchar3"};{1:null,2:"str3"};{1:null,2:2012-03-03};{1:null,2:2012-03-03};[];{"id":1} +3;0;4;4;4;4;4;0.4;0.4;0.4;0.4;0.4;0.4;0.4;0.4;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-04;2012-03-04;2012-03-04 04:03:04;2012-03-04 04:03:04;2012-03-04 04:03:04;2012-03-04 04:03:04;[0];[0];[4];[4];[4];[4];[4];[0.4];[2012-03-04,2012-03-04];[2012-03-04 04:03:04];[2012-03-04,2012-03-04];[2012-03-04 04:03:04,2012-03-04 04:03:04,2012-03-04 04:03:04,2012-03-04 04:03:04];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.4,0.4];ST_Point(12.4, 32.4);ST_Point(12.4, 32.4);-46.35620117;39.939093;23.4274406433;-26.9020987219;0;4;3.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 11 4, 42 44, 4 18, 1 1));{null:1,0:2,1:3};{null:1,4:2,9:3};{null:1,4:2};{null:1,4:2};{null:1,4:2};{null:1,4:2};{null:1,4.4:2};{null:1,4.4:2};{null:1,4.400:2};{null:1,"char4":2};{null:1,"vchar4":2};{null:1,"str4":2};{null:1,2012-03-04:2};{null:1,2012-03-04:2};{1:null,2:0,3:1};{1:null,2:4,3:9};{1:null,2:4};{1:null,2:4};{1:null,2:4};{1:null,2:4};{1:null,2:4.4};{1:null,2:4.4};{1:null,2:4.400};{1:null,2:"char4"};{1:null,2:"vchar4"};{1:null,2:"str4"};{1:null,2:2012-03-04};{1:null,2:2012-03-04};[];{"id":1} +4;0;5;5;5;5;5;0.5;0.5;0.5;0.5;0.5;0.5;0.5;0.5;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-05;2012-03-05;2012-03-05 05:04:05;2012-03-05 05:04:05;2012-03-05 05:04:05;2012-03-05 05:04:05;[0];[0];[5];[5];[5];[5];[5];[0.5];[2012-03-05,2012-03-05];[2012-03-05 05:04:05];[2012-03-05,2012-03-05];[2012-03-05 05:04:05,2012-03-05 05:04:05,2012-03-05 05:04:05,2012-03-05 05:04:05];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[0.5,0.5];ST_Point(12.5, 32.5);ST_Point(12.5, 32.5);43.35620117;35.939093;35.4274406433;35.9020987219;0;5;2.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 16 1, 16 16, 1 16, 1 1));{null:1,0:2,1:3};{null:1,5:2,8:3};{null:1,5:2};{null:1,5:2};{null:1,5:2};{null:1,5:2};{null:1,5.5:2};{null:1,5.5:2};{null:1,5.500:2};{null:1,"char5":2};{null:1,"vchar5":2};{null:1,"str5":2};{null:1,2012-03-05:2};{null:1,2012-03-05:2};{1:null,2:0,3:1};{1:null,2:5,3:8};{1:null,2:5};{1:null,2:5};{1:null,2:5};{1:null,2:5};{1:null,2:5.5};{1:null,2:5.5};{1:null,2:5.500};{1:null,2:"char5"};{1:null,2:"vchar5"};{1:null,2:"str5"};{1:null,2:2012-03-05};{1:null,2:2012-03-05};[];{"id":1} +5;0;6;6;6;6;6;0.6;0.6;0.6;0.6;0.6;0.6;0.6;0.6;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-06;2012-03-06;2012-03-06 06:05:06;2012-03-06 06:05:06;2012-03-06 06:05:06;2012-03-06 06:05:06;[0];[0];[6];[6];[6];[6];[6];[0.6];[2012-03-06,2012-03-06];[2012-03-06 06:05:06];[2012-03-06,2012-03-06];[2012-03-06 06:05:06,2012-03-06 06:05:06,2012-03-06 06:05:06,2012-03-06 06:05:06];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[0.6,0.6];ST_Point(12.6, 32.6);ST_Point(12.6, 32.6);47.35620117;26.939093;47.4274406433;75.9020987219;0;6;33.321;LINESTRING (1 1, 2 2);POLYGON ((4 4, 45 4, 45 45, 4 45, 4 4));{null:1,0:2,1:3};{null:1,6:2,7:3};{null:1,6:2};{null:1,6:2};{null:1,6:2};{null:1,6:2};{null:1,6.6:2};{null:1,6.6:2};{null:1,6.600:2};{null:1,"char6":2};{null:1,"vchar6":2};{null:1,"str6":2};{null:1,2012-03-06:2};{null:1,2012-03-06:2};{1:null,2:0,3:1};{1:null,2:6,3:7};{1:null,2:6};{1:null,2:6};{1:null,2:6};{1:null,2:6};{1:null,2:6.6};{1:null,2:6.6};{1:null,2:6.600};{1:null,2:"char6"};{1:null,2:"vchar6"};{1:null,2:"str6"};{1:null,2:2012-03-06};{1:null,2:2012-03-06};[];{"id":1} +6;0;7;7;7;7;7;0.7;0.7;0.7;0.7;0.7;0.7;0.7;0.7;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-07;2012-03-07;2012-03-07 07:06:07;2012-03-07 07:06:07;2012-03-07 07:06:07;2012-03-07 07:06:07;[0];[0];[7];[7];[7];[7];[7];[0.7];[2012-03-07,2012-03-07];[2012-03-07 07:06:07];[2012-03-07,2012-03-07];[2012-03-07 07:06:07,2012-03-07 07:06:07,2012-03-07 07:06:07,2012-03-07 07:06:07];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.7,0.7];ST_Point(12.7, 32.7);ST_Point(12.7, 32.7);98.35620117;36.939093;57.4274406433;57.9020987219;0;7;45.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 34 1, 34 34, 1 34, 1 1));{null:1,1:2,0:3};{null:1,7:2,6:3};{null:1,7:2};{null:1,7:2};{null:1,7:2};{null:1,7:2};{null:1,7.7:2};{null:1,7.7:2};{null:1,7.700:2};{null:1,"char7":2};{null:1,"vchar7":2};{null:1,"str7":2};{null:1,2012-03-07:2};{null:1,2012-03-07:2};{1:null,2:1,3:0};{1:null,2:7,3:6};{1:null,2:7};{1:null,2:7};{1:null,2:7};{1:null,2:7};{1:null,2:7.7};{1:null,2:7.7};{1:null,2:7.700};{1:null,2:"char7"};{1:null,2:"vchar7"};{1:null,2:"str7"};{1:null,2:2012-03-07};{1:null,2:2012-03-07};[];{"id":1} +7;1;8;8;8;8;8;0.8;0.8;0.8;0.8;0.8;0.8;0.8;0.8;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-08;2012-03-08;2012-03-08 08:07:08;2012-03-08 08:07:08;2012-03-08 08:07:08;2012-03-08 08:07:08;[1];[1];[8];[8];[8];[8];[8];[0.8];[2012-03-08,2012-03-08];[2012-03-08 08:07:08];[2012-03-08,2012-03-08];[2012-03-08 08:07:08,2012-03-08 08:07:08,2012-03-08 08:07:08,2012-03-08 08:07:08];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[0.8,0.8];ST_Point(11.1, 31.1);ST_Point(11.1, 31.1);-74.35620117;79.939093;59.4274406433;85.9020987219;0;8;66.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 38 4, 38 37, 4 1, 0 0));{null:1,1:2,0:3};{null:1,8:2,5:3};{null:1,8:2};{null:1,8:2};{null:1,8:2};{null:1,8:2};{null:1,8.8:2};{null:1,8.8:2};{null:1,8.800:2};{null:1,"char8":2};{null:1,"vchar8":2};{null:1,"str8":2};{null:1,2012-03-08:2};{null:1,2012-03-08:2};{1:null,2:1,3:0};{1:null,2:8,3:5};{1:null,2:8};{1:null,2:8};{1:null,2:8};{1:null,2:8};{1:null,2:8.8};{1:null,2:8.8};{1:null,2:8.800};{1:null,2:"char8"};{1:null,2:"vchar8"};{1:null,2:"str8"};{1:null,2:2012-03-08};{1:null,2:2012-03-08};[];{"id":1} +8;1;9;9;9;9;9;0.9;0.9;0.9;0.9;0.9;0.9;0.9;0.9;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-09;2012-03-09;2012-03-09 09:08:09;2012-03-09 09:08:09;2012-03-09 09:08:09;2012-03-09 09:08:09;[1];[1];[9];[9];[9];[9];[9];[0.9];[2012-03-09,2012-03-09];[2012-03-09 09:08:09];[2012-03-09,2012-03-09];[2012-03-09 09:08:09,2012-03-09 09:08:09,2012-03-09 09:08:09,2012-03-09 09:08:09];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[0.9,0.9];ST_Point(12.1, 32.1);ST_Point(12.1, 32.1);90.35620117;47.939093;89.4274406433;58.9020987219;0;9;88.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 56 0, 67 89, 4 32, 1 1));{null:1,1:2,0:3};{null:1,9:2,4:3};{null:1,9:2};{null:1,9:2};{null:1,9:2};{null:1,9:2};{null:1,9.9:2};{null:1,9.9:2};{null:1,9.900:2};{null:1,"char9":2};{null:1,"vchar9":2};{null:1,"str9":2};{null:1,2012-03-09:2};{null:1,2012-03-09:2};{1:null,2:1,3:0};{1:null,2:9,3:4};{1:null,2:9};{1:null,2:9};{1:null,2:9};{1:null,2:9};{1:null,2:9.9};{1:null,2:9.9};{1:null,2:9.900};{1:null,2:"char9"};{1:null,2:"vchar9"};{1:null,2:"str9"};{1:null,2:2012-03-09};{1:null,2:2012-03-09};[];{"id":1} +9;1;10;10;10;10;10;1;1;1;1;1;1;1;1;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-10;2012-03-10;2012-03-10 10:09:10;2012-03-10 10:09:10;2012-03-10 10:09:10;2012-03-10 10:09:10;[1];[1];[10];[10];[10];[10];[10];[1];[2012-03-10,2012-03-10];[2012-03-10 10:09:10];[2012-03-10,2012-03-10];[2012-03-10 10:09:10,2012-03-10 10:09:10,2012-03-10 10:09:10,2012-03-10 10:09:10];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[1,1];ST_Point(19.1, 39.1);ST_Point(19.1, 39.1);90.35620117;49.939093;89.4274406433;58.9020987219;0;10;76.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 5 0, 10 4, 4 3, 1 1));{null:1,1:2,1:3};{null:1,0:2,3:3};{null:1,0:2};{null:1,0:2};{null:1,0:2};{null:1,0:2};{null:1,0.0:2};{null:1,0.0:2};{null:1,0.000:2};{null:1,"char0":2};{null:1,"vchar0":2};{null:1,"str0":2};{null:1,2012-03-10:2};{null:1,2012-03-10:2};{1:null,2:1,3:1};{1:null,2:0,3:3};{1:null,2:0};{1:null,2:0};{1:null,2:0};{1:null,2:0};{1:null,2:0.0};{1:null,2:0.0};{1:null,2:0.000};{1:null,2:"char0"};{1:null,2:"vchar0"};{1:null,2:"str0"};{1:null,2:2012-03-10};{1:null,2:2012-03-10};[];{"id":1} +10;1;11;11;11;11;11;1.1;1.1;1.1;1.1;1.1;1.1;1.1;1.1;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-11;2012-03-11;2012-03-11 11:10:11;2012-03-11 11:10:11;2012-03-11 11:10:11;2012-03-11 11:10:11;[1];[1];[11];[11];[11];[11];[11];[1.1];[2012-03-11,2012-03-11];[2012-03-11 11:10:11];[2012-03-11,2012-03-11];[2012-03-11 11:10:11,2012-03-11 11:10:11,2012-03-11 11:10:11,2012-03-11 11:10:11];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[1.1,1.1];ST_Point(17.1, 37.1);ST_Point(17.1, 37.1);90.35620117;59.939093;89.4274406433;58.9020987219;0;11;75.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 8 0, 48 34, 4 10, 1 1));{null:1,1:2,1:3};{null:1,1:2,2:3};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1.1:2};{null:1,1.1:2};{null:1,1.100:2};{null:1,"char1":2};{null:1,"vchar1":2};{null:1,"str1":2};{null:1,2012-03-11:2};{null:1,2012-03-11:2};{1:null,2:1,3:1};{1:null,2:1,3:2};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1.1};{1:null,2:1.1};{1:null,2:1.100};{1:null,2:"char1"};{1:null,2:"vchar1"};{1:null,2:"str1"};{1:null,2:2012-03-11};{1:null,2:2012-03-11};[];{"id":1} +11;1;12;12;12;12;12;1.2;1.2;1.2;1.2;1.2;1.2;1.2;1.2;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-12;2012-03-12;2012-03-12 12:11:12;2012-03-12 12:11:12;2012-03-12 12:11:12;2012-03-12 12:11:12;[1];[1];[12];[12];[12];[12];[12];[1.2];[2012-03-12,2012-03-12];[2012-03-12 12:11:12];[2012-03-12,2012-03-12];[2012-03-12 12:11:12,2012-03-12 12:11:12,2012-03-12 12:11:12,2012-03-12 12:11:12];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[1.2,1.2];ST_Point(22.1, 22.1);ST_Point(22.1, 22.1);90.35620117;39.939093;89.4274406433;58.9020987219;0;12;100.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 9 4, 10 4, 4 1, 1 1));{null:1,1:2,1:3};{null:1,2:2,1:3};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2.2:2};{null:1,2.2:2};{null:1,2.200:2};{null:1,"char2":2};{null:1,"vchar2":2};{null:1,"str2":2};{null:1,2012-03-12:2};{null:1,2012-03-12:2};{1:null,2:1,3:1};{1:null,2:2,3:1};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2.2};{1:null,2:2.2};{1:null,2:2.200};{1:null,2:"char2"};{1:null,2:"vchar2"};{1:null,2:"str2"};{1:null,2:2012-03-12};{1:null,2:2012-03-12};[];{"id":1} +null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null +0;0;1;1;1;1;1;0.1;0.1;0.1;0.1;0.1;0.1;0.1;0.1;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-01;2012-03-01;2012-03-01 01:00:01;2012-03-01 01:00:01;2012-03-01 01:00:01;2012-03-01 01:00:01;[0];[0];[1];[1];[1];[1];[1];[0.1];[2012-03-01,2012-03-01];[2012-03-01 01:00:01];[2012-03-01,2012-03-01];[2012-03-01 01:00:01,2012-03-01 01:00:01,2012-03-01 01:00:01,2012-03-01 01:00:01];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.1,0.1];ST_Point(12.1, 32.1);ST_Point(12.1, 32.1);5;5;116.4274406433;39.9020987219;0;1;4.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0));{null:1,0:2,0:3};{null:1,1:2,2:3};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1.1:2};{null:1,1.1:2};{null:1,1.100:2};{null:1,"char1":2};{null:1,"vchar1":2};{null:1,"str1":2};{null:1,2012-03-01:2};{null:1,2012-03-01:2};{1:null,2:0,3:0};{1:null,2:1,3:2};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1.1};{1:null,2:1.1};{1:null,2:1.100};{1:null,2:"char1"};{1:null,2:"vchar1"};{1:null,2:"str1"};{1:null,2:2012-03-01};{1:null,2:2012-03-01};[];{"id":1} +1;0;2;2;2;2;2;0.2;0.2;0.2;0.2;0.2;0.2;0.2;0.2;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-02;2012-03-02;2012-03-02 02:01:02;2012-03-02 02:01:02;2012-03-02 02:01:02;2012-03-02 02:01:02;[0];[0];[2];[2];[2];[2];[2];[0.2];[2012-03-02,2012-03-02];[2012-03-02 02:01:02];[2012-03-02,2012-03-02];[2012-03-02 02:01:02,2012-03-02 02:01:02,2012-03-02 02:01:02,2012-03-02 02:01:02];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[0.2,0.2];ST_Point(12.2, 32.2);ST_Point(12.2, 32.2);126.35620117;-39.939093;56.4274406433;66.9020987219;0;2;5.3;LINESTRING (1 1, 2 2);POLYGON ((1 1, 4 1, 4 4, 1 4, 1 1));{null:1,0:2,0:3};{null:1,2:2,1:3};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2.2:2};{null:1,2.2:2};{null:1,2.200:2};{null:1,"char2":2};{null:1,"vchar2":2};{null:1,"str2":2};{null:1,2012-03-02:2};{null:1,2012-03-02:2};{1:null,2:0,3:0};{1:null,2:2,3:1};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2.2};{1:null,2:2.2};{1:null,2:2.200};{1:null,2:"char2"};{1:null,2:"vchar2"};{1:null,2:"str2"};{1:null,2:2012-03-02};{1:null,2:2012-03-02};[];{"id":1} +2;0;3;3;3;3;3;0.3;0.3;0.3;0.3;0.3;0.3;0.3;0.3;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-03;2012-03-03;2012-03-03 03:02:03;2012-03-03 03:02:03;2012-03-03 03:02:03;2012-03-03 03:02:03;[0];[0];[3];[3];[3];[3];[3];[0.3];[2012-03-03,2012-03-03];[2012-03-03 03:02:03];[2012-03-03,2012-03-03];[2012-03-03 03:02:03,2012-03-03 03:02:03,2012-03-03 03:02:03,2012-03-03 03:02:03];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[0.3,0.3];ST_Point(12.3, 32.3);ST_Point(12.3, 32.3);16.35620117;19.939093;76.4274406433;46.9020987219;0;3;7.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 12 0, 10 16, 1 10, 0 0));{null:1,0:2,0:3};{null:1,3:2,0:3};{null:1,3:2};{null:1,3:2};{null:1,3:2};{null:1,3:2};{null:1,3.3:2};{null:1,3.3:2};{null:1,3.300:2};{null:1,"char3":2};{null:1,"vchar3":2};{null:1,"str3":2};{null:1,2012-03-03:2};{null:1,2012-03-03:2};{1:null,2:0,3:0};{1:null,2:3,3:0};{1:null,2:3};{1:null,2:3};{1:null,2:3};{1:null,2:3};{1:null,2:3.3};{1:null,2:3.3};{1:null,2:3.300};{1:null,2:"char3"};{1:null,2:"vchar3"};{1:null,2:"str3"};{1:null,2:2012-03-03};{1:null,2:2012-03-03};[];{"id":1} +3;0;4;4;4;4;4;0.4;0.4;0.4;0.4;0.4;0.4;0.4;0.4;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-04;2012-03-04;2012-03-04 04:03:04;2012-03-04 04:03:04;2012-03-04 04:03:04;2012-03-04 04:03:04;[0];[0];[4];[4];[4];[4];[4];[0.4];[2012-03-04,2012-03-04];[2012-03-04 04:03:04];[2012-03-04,2012-03-04];[2012-03-04 04:03:04,2012-03-04 04:03:04,2012-03-04 04:03:04,2012-03-04 04:03:04];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.4,0.4];ST_Point(12.4, 32.4);ST_Point(12.4, 32.4);-46.35620117;39.939093;23.4274406433;-26.9020987219;0;4;3.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 11 4, 42 44, 4 18, 1 1));{null:1,0:2,1:3};{null:1,4:2,9:3};{null:1,4:2};{null:1,4:2};{null:1,4:2};{null:1,4:2};{null:1,4.4:2};{null:1,4.4:2};{null:1,4.400:2};{null:1,"char4":2};{null:1,"vchar4":2};{null:1,"str4":2};{null:1,2012-03-04:2};{null:1,2012-03-04:2};{1:null,2:0,3:1};{1:null,2:4,3:9};{1:null,2:4};{1:null,2:4};{1:null,2:4};{1:null,2:4};{1:null,2:4.4};{1:null,2:4.4};{1:null,2:4.400};{1:null,2:"char4"};{1:null,2:"vchar4"};{1:null,2:"str4"};{1:null,2:2012-03-04};{1:null,2:2012-03-04};[];{"id":1} +4;0;5;5;5;5;5;0.5;0.5;0.5;0.5;0.5;0.5;0.5;0.5;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-05;2012-03-05;2012-03-05 05:04:05;2012-03-05 05:04:05;2012-03-05 05:04:05;2012-03-05 05:04:05;[0];[0];[5];[5];[5];[5];[5];[0.5];[2012-03-05,2012-03-05];[2012-03-05 05:04:05];[2012-03-05,2012-03-05];[2012-03-05 05:04:05,2012-03-05 05:04:05,2012-03-05 05:04:05,2012-03-05 05:04:05];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[0.5,0.5];ST_Point(12.5, 32.5);ST_Point(12.5, 32.5);43.35620117;35.939093;35.4274406433;35.9020987219;0;5;2.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 16 1, 16 16, 1 16, 1 1));{null:1,0:2,1:3};{null:1,5:2,8:3};{null:1,5:2};{null:1,5:2};{null:1,5:2};{null:1,5:2};{null:1,5.5:2};{null:1,5.5:2};{null:1,5.500:2};{null:1,"char5":2};{null:1,"vchar5":2};{null:1,"str5":2};{null:1,2012-03-05:2};{null:1,2012-03-05:2};{1:null,2:0,3:1};{1:null,2:5,3:8};{1:null,2:5};{1:null,2:5};{1:null,2:5};{1:null,2:5};{1:null,2:5.5};{1:null,2:5.5};{1:null,2:5.500};{1:null,2:"char5"};{1:null,2:"vchar5"};{1:null,2:"str5"};{1:null,2:2012-03-05};{1:null,2:2012-03-05};[];{"id":1} +5;0;6;6;6;6;6;0.6;0.6;0.6;0.6;0.6;0.6;0.6;0.6;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-06;2012-03-06;2012-03-06 06:05:06;2012-03-06 06:05:06;2012-03-06 06:05:06;2012-03-06 06:05:06;[0];[0];[6];[6];[6];[6];[6];[0.6];[2012-03-06,2012-03-06];[2012-03-06 06:05:06];[2012-03-06,2012-03-06];[2012-03-06 06:05:06,2012-03-06 06:05:06,2012-03-06 06:05:06,2012-03-06 06:05:06];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[0.6,0.6];ST_Point(12.6, 32.6);ST_Point(12.6, 32.6);47.35620117;26.939093;47.4274406433;75.9020987219;0;6;33.321;LINESTRING (1 1, 2 2);POLYGON ((4 4, 45 4, 45 45, 4 45, 4 4));{null:1,0:2,1:3};{null:1,6:2,7:3};{null:1,6:2};{null:1,6:2};{null:1,6:2};{null:1,6:2};{null:1,6.6:2};{null:1,6.6:2};{null:1,6.600:2};{null:1,"char6":2};{null:1,"vchar6":2};{null:1,"str6":2};{null:1,2012-03-06:2};{null:1,2012-03-06:2};{1:null,2:0,3:1};{1:null,2:6,3:7};{1:null,2:6};{1:null,2:6};{1:null,2:6};{1:null,2:6};{1:null,2:6.6};{1:null,2:6.6};{1:null,2:6.600};{1:null,2:"char6"};{1:null,2:"vchar6"};{1:null,2:"str6"};{1:null,2:2012-03-06};{1:null,2:2012-03-06};[];{"id":1} +6;0;7;7;7;7;7;0.7;0.7;0.7;0.7;0.7;0.7;0.7;0.7;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-07;2012-03-07;2012-03-07 07:06:07;2012-03-07 07:06:07;2012-03-07 07:06:07;2012-03-07 07:06:07;[0];[0];[7];[7];[7];[7];[7];[0.7];[2012-03-07,2012-03-07];[2012-03-07 07:06:07];[2012-03-07,2012-03-07];[2012-03-07 07:06:07,2012-03-07 07:06:07,2012-03-07 07:06:07,2012-03-07 07:06:07];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.7,0.7];ST_Point(12.7, 32.7);ST_Point(12.7, 32.7);98.35620117;36.939093;57.4274406433;57.9020987219;0;7;45.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 34 1, 34 34, 1 34, 1 1));{null:1,1:2,0:3};{null:1,7:2,6:3};{null:1,7:2};{null:1,7:2};{null:1,7:2};{null:1,7:2};{null:1,7.7:2};{null:1,7.7:2};{null:1,7.700:2};{null:1,"char7":2};{null:1,"vchar7":2};{null:1,"str7":2};{null:1,2012-03-07:2};{null:1,2012-03-07:2};{1:null,2:1,3:0};{1:null,2:7,3:6};{1:null,2:7};{1:null,2:7};{1:null,2:7};{1:null,2:7};{1:null,2:7.7};{1:null,2:7.7};{1:null,2:7.700};{1:null,2:"char7"};{1:null,2:"vchar7"};{1:null,2:"str7"};{1:null,2:2012-03-07};{1:null,2:2012-03-07};[];{"id":1} +7;1;8;8;8;8;8;0.8;0.8;0.8;0.8;0.8;0.8;0.8;0.8;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-08;2012-03-08;2012-03-08 08:07:08;2012-03-08 08:07:08;2012-03-08 08:07:08;2012-03-08 08:07:08;[1];[1];[8];[8];[8];[8];[8];[0.8];[2012-03-08,2012-03-08];[2012-03-08 08:07:08];[2012-03-08,2012-03-08];[2012-03-08 08:07:08,2012-03-08 08:07:08,2012-03-08 08:07:08,2012-03-08 08:07:08];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[0.8,0.8];ST_Point(11.1, 31.1);ST_Point(11.1, 31.1);-74.35620117;79.939093;59.4274406433;85.9020987219;0;8;66.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 38 4, 38 37, 4 1, 0 0));{null:1,1:2,0:3};{null:1,8:2,5:3};{null:1,8:2};{null:1,8:2};{null:1,8:2};{null:1,8:2};{null:1,8.8:2};{null:1,8.8:2};{null:1,8.800:2};{null:1,"char8":2};{null:1,"vchar8":2};{null:1,"str8":2};{null:1,2012-03-08:2};{null:1,2012-03-08:2};{1:null,2:1,3:0};{1:null,2:8,3:5};{1:null,2:8};{1:null,2:8};{1:null,2:8};{1:null,2:8};{1:null,2:8.8};{1:null,2:8.8};{1:null,2:8.800};{1:null,2:"char8"};{1:null,2:"vchar8"};{1:null,2:"str8"};{1:null,2:2012-03-08};{1:null,2:2012-03-08};[];{"id":1} +8;1;9;9;9;9;9;0.9;0.9;0.9;0.9;0.9;0.9;0.9;0.9;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-09;2012-03-09;2012-03-09 09:08:09;2012-03-09 09:08:09;2012-03-09 09:08:09;2012-03-09 09:08:09;[1];[1];[9];[9];[9];[9];[9];[0.9];[2012-03-09,2012-03-09];[2012-03-09 09:08:09];[2012-03-09,2012-03-09];[2012-03-09 09:08:09,2012-03-09 09:08:09,2012-03-09 09:08:09,2012-03-09 09:08:09];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[0.9,0.9];ST_Point(12.1, 32.1);ST_Point(12.1, 32.1);90.35620117;47.939093;89.4274406433;58.9020987219;0;9;88.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 56 0, 67 89, 4 32, 1 1));{null:1,1:2,0:3};{null:1,9:2,4:3};{null:1,9:2};{null:1,9:2};{null:1,9:2};{null:1,9:2};{null:1,9.9:2};{null:1,9.9:2};{null:1,9.900:2};{null:1,"char9":2};{null:1,"vchar9":2};{null:1,"str9":2};{null:1,2012-03-09:2};{null:1,2012-03-09:2};{1:null,2:1,3:0};{1:null,2:9,3:4};{1:null,2:9};{1:null,2:9};{1:null,2:9};{1:null,2:9};{1:null,2:9.9};{1:null,2:9.9};{1:null,2:9.900};{1:null,2:"char9"};{1:null,2:"vchar9"};{1:null,2:"str9"};{1:null,2:2012-03-09};{1:null,2:2012-03-09};[];{"id":1} +9;1;10;10;10;10;10;1;1;1;1;1;1;1;1;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-10;2012-03-10;2012-03-10 10:09:10;2012-03-10 10:09:10;2012-03-10 10:09:10;2012-03-10 10:09:10;[1];[1];[10];[10];[10];[10];[10];[1];[2012-03-10,2012-03-10];[2012-03-10 10:09:10];[2012-03-10,2012-03-10];[2012-03-10 10:09:10,2012-03-10 10:09:10,2012-03-10 10:09:10,2012-03-10 10:09:10];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[1,1];ST_Point(19.1, 39.1);ST_Point(19.1, 39.1);90.35620117;49.939093;89.4274406433;58.9020987219;0;10;76.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 5 0, 10 4, 4 3, 1 1));{null:1,1:2,1:3};{null:1,0:2,3:3};{null:1,0:2};{null:1,0:2};{null:1,0:2};{null:1,0:2};{null:1,0.0:2};{null:1,0.0:2};{null:1,0.000:2};{null:1,"char0":2};{null:1,"vchar0":2};{null:1,"str0":2};{null:1,2012-03-10:2};{null:1,2012-03-10:2};{1:null,2:1,3:1};{1:null,2:0,3:3};{1:null,2:0};{1:null,2:0};{1:null,2:0};{1:null,2:0};{1:null,2:0.0};{1:null,2:0.0};{1:null,2:0.000};{1:null,2:"char0"};{1:null,2:"vchar0"};{1:null,2:"str0"};{1:null,2:2012-03-10};{1:null,2:2012-03-10};[];{"id":1} +10;1;11;11;11;11;11;1.1;1.1;1.1;1.1;1.1;1.1;1.1;1.1;char12;char22;char32;varchar12;varchar22;varchar32;string2;2012-03-11;2012-03-11;2012-03-11 11:10:11;2012-03-11 11:10:11;2012-03-11 11:10:11;2012-03-11 11:10:11;[1];[1];[11];[11];[11];[11];[11];[1.1];[2012-03-11,2012-03-11];[2012-03-11 11:10:11];[2012-03-11,2012-03-11];[2012-03-11 11:10:11,2012-03-11 11:10:11,2012-03-11 11:10:11,2012-03-11 11:10:11];[char12,char22,char32];[char12,char22,char32,varchar12,varchar22,varchar32];[char22,char32,varchar12,varchar22,varchar32,string2];[1.1,1.1];ST_Point(17.1, 37.1);ST_Point(17.1, 37.1);90.35620117;59.939093;89.4274406433;58.9020987219;0;11;75.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 8 0, 48 34, 4 10, 1 1));{null:1,1:2,1:3};{null:1,1:2,2:3};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1.1:2};{null:1,1.1:2};{null:1,1.100:2};{null:1,"char1":2};{null:1,"vchar1":2};{null:1,"str1":2};{null:1,2012-03-11:2};{null:1,2012-03-11:2};{1:null,2:1,3:1};{1:null,2:1,3:2};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1.1};{1:null,2:1.1};{1:null,2:1.100};{1:null,2:"char1"};{1:null,2:"vchar1"};{1:null,2:"str1"};{1:null,2:2012-03-11};{1:null,2:2012-03-11};[];{"id":1} +11;1;12;12;12;12;12;1.2;1.2;1.2;1.2;1.2;1.2;1.2;1.2;char13;char23;char33;varchar13;varchar23;varchar33;string3;2012-03-12;2012-03-12;2012-03-12 12:11:12;2012-03-12 12:11:12;2012-03-12 12:11:12;2012-03-12 12:11:12;[1];[1];[12];[12];[12];[12];[12];[1.2];[2012-03-12,2012-03-12];[2012-03-12 12:11:12];[2012-03-12,2012-03-12];[2012-03-12 12:11:12,2012-03-12 12:11:12,2012-03-12 12:11:12,2012-03-12 12:11:12];[char13,char23,char33];[char13,char23,char33,varchar13,varchar23,varchar33];[char23,char33,varchar13,varchar23,varchar33,string3];[1.2,1.2];ST_Point(22.1, 22.1);ST_Point(22.1, 22.1);90.35620117;39.939093;89.4274406433;58.9020987219;0;12;100.321;LINESTRING (1 1, 2 2);POLYGON ((1 1, 9 4, 10 4, 4 1, 1 1));{null:1,1:2,1:3};{null:1,2:2,1:3};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2:2};{null:1,2.2:2};{null:1,2.2:2};{null:1,2.200:2};{null:1,"char2":2};{null:1,"vchar2":2};{null:1,"str2":2};{null:1,2012-03-12:2};{null:1,2012-03-12:2};{1:null,2:1,3:1};{1:null,2:2,3:1};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2};{1:null,2:2.2};{1:null,2:2.2};{1:null,2:2.200};{1:null,2:"char2"};{1:null,2:"vchar2"};{1:null,2:"str2"};{1:null,2:2012-03-12};{1:null,2:2012-03-12};[];{"id":1} +null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null;null +null;0;1;1;1;1;1;0.1;0.1;0.1;0.1;0.1;0.1;0.1;0.1;char11;char21;char31;varchar11;varchar21;varchar31;string1;2012-03-01;2012-03-01;2012-03-01 01:00:01;2012-03-01 01:00:01;2012-03-01 01:00:01;2012-03-01 01:00:01;[0];[0];[1];[1];[1];[1];[1];[0.1];[2012-03-01,2012-03-01];[2012-03-01 01:00:01];[2012-03-01,2012-03-01];[2012-03-01 01:00:01,2012-03-01 01:00:01,2012-03-01 01:00:01,2012-03-01 01:00:01];[char11,char21,char31];[char11,char21,char31,varchar11,varchar21,varchar31];[char21,char31,varchar11,varchar21,varchar31,string1];[0.1,0.1];ST_Point(12.1, 32.1);ST_Point(12.1, 32.1);5;5;116.4274406433;39.9020987219;0;1;4.321;LINESTRING (1 1, 2 2);POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0));{null:1,0:2,0:3};{null:1,1:2,2:3};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1:2};{null:1,1.1:2};{null:1,1.1:2};{null:1,1.100:2};{null:1,"char1":2};{null:1,"vchar1":2};{null:1,"str1":2};{null:1,2012-03-01:2};{null:1,2012-03-01:2};{1:null,2:0,3:0};{1:null,2:1,3:2};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1};{1:null,2:1.1};{1:null,2:1.1};{1:null,2:1.100};{1:null,2:"char1"};{1:null,2:"vchar1"};{1:null,2:"str1"};{1:null,2:2012-03-01};{1:null,2:2012-03-01};[];{"id":1} diff --git a/regression-test/data/nereids_syntax_p1/mv/aggregate/agg_sync_mv.out b/regression-test/data/nereids_syntax_p1/mv/aggregate/agg_sync_mv.out new file mode 100644 index 00000000000000..fee553ed3d091d --- /dev/null +++ b/regression-test/data/nereids_syntax_p1/mv/aggregate/agg_sync_mv.out @@ -0,0 +1,1516 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_any_value -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_any_value_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_approx_count_distinct -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_approx_count_distinct_mv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_collect_set -- +\N [1] +0 [1] +1 [2] +2 [3] +3 [4] +4 [5] +5 [6] +6 [7] +7 [8] +8 [9] +9 [10] +10 [11] +11 [12] + +-- !select_collect_set_mv -- +\N [1] +0 [1] +1 [2] +2 [3] +3 [4] +4 [5] +5 [6] +6 [7] +7 [8] +8 [9] +9 [10] +10 [11] +11 [12] + +-- !select_collect_list -- +\N [1, 1, 1, 1, 1] +0 [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +1 [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] +2 [3, 3, 3, 3, 3, 3, 3, 3, 3, 3] +3 [4, 4, 4, 4, 4, 4, 4, 4, 4, 4] +4 [5, 5, 5, 5, 5, 5, 5, 5, 5, 5] +5 [6, 6, 6, 6, 6, 6, 6, 6, 6, 6] +6 [7, 7, 7, 7, 7, 7, 7, 7, 7, 7] +7 [8, 8, 8, 8, 8, 8, 8, 8, 8, 8] +8 [9, 9, 9, 9, 9, 9, 9, 9, 9, 9] +9 [10, 10, 10, 10, 10, 10, 10, 10, 10, 10] +10 [11, 11, 11, 11, 11, 11, 11, 11, 11, 11] +11 [12, 12, 12, 12, 12, 12, 12, 12, 12, 12] + +-- !select_collect_list_mv -- +\N [1, 1, 1, 1, 1] +0 [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +1 [2, 2, 2, 2, 2, 2, 2, 2, 2, 2] +2 [3, 3, 3, 3, 3, 3, 3, 3, 3, 3] +3 [4, 4, 4, 4, 4, 4, 4, 4, 4, 4] +4 [5, 5, 5, 5, 5, 5, 5, 5, 5, 5] +5 [6, 6, 6, 6, 6, 6, 6, 6, 6, 6] +6 [7, 7, 7, 7, 7, 7, 7, 7, 7, 7] +7 [8, 8, 8, 8, 8, 8, 8, 8, 8, 8] +8 [9, 9, 9, 9, 9, 9, 9, 9, 9, 9] +9 [10, 10, 10, 10, 10, 10, 10, 10, 10, 10] +10 [11, 11, 11, 11, 11, 11, 11, 11, 11, 11] +11 [12, 12, 12, 12, 12, 12, 12, 12, 12, 12] + +-- !select_corr -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_corr_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_percentile_array -- +\N [1, 1, 1] +0 [1, 1, 1] +1 [2, 2, 2] +2 [3, 3, 3] +3 [4, 4, 4] +4 [5, 5, 5] +5 [6, 6, 6] +6 [7, 7, 7] +7 [8, 8, 8] +8 [9, 9, 9] +9 [10, 10, 10] +10 [11, 11, 11] +11 [12, 12, 12] + +-- !select_percentile_array_mv -- +\N [1, 1, 1] +0 [1, 1, 1] +1 [2, 2, 2] +2 [3, 3, 3] +3 [4, 4, 4] +4 [5, 5, 5] +5 [6, 6, 6] +6 [7, 7, 7] +7 [8, 8, 8] +8 [9, 9, 9] +9 [10, 10, 10] +10 [11, 11, 11] +11 [12, 12, 12] + +-- !select_quantile_union -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_quantile_union_mv -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_count_by_enum -- +\N [{"cbe":{"string1":5,"null":10},"notnull":15,"null":0,"all":15}] +0 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +1 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +2 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] +3 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +4 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +5 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] +6 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +7 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +8 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] +9 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +10 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +11 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] + +-- !select_count_by_enum_mv -- +\N [{"cbe":{"string1":5,"null":10},"notnull":15,"null":0,"all":15}] +0 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +1 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +2 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] +3 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +4 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +5 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] +6 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +7 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +8 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] +9 [{"cbe":{"string1":10},"notnull":10,"null":0,"all":10}] +10 [{"cbe":{"string2":10},"notnull":10,"null":0,"all":10}] +11 [{"cbe":{"string3":10},"notnull":10,"null":0,"all":10}] + +-- !select_avg_weighted -- +\N 1.0 +0 1.0 +1 2.0 +2 3.0 +3 4.0 +4 5.0 +5 6.0 +6 6.999999999999999 +7 8.0 +8 9.0 +9 10.0 +10 11.000000000000002 +11 12.0 + +-- !select_avg_weighted_mv -- +\N 1.0 +0 1.0 +1 2.0 +2 3.0 +3 4.0 +4 5.0 +5 6.0 +6 6.999999999999999 +7 8.0 +8 9.0 +9 10.0 +10 11.000000000000002 +11 12.0 + +-- !select_bitmap_intersect -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_bitmap_intersect_mv -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_bitmap_agg -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_bitmap_agg_mv -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_bitmap_union -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_bitmap_union_mv -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_bitmap_union_count -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_bitmap_union_count_mv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_bitmap_union_int -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_bitmap_union_int_mv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_group_array_intersect -- +\N [] +0 [1] +1 [2] +2 [3] +3 [4] +4 [5] +5 [6] +6 [7] +7 [8] +8 [9] +9 [10] +10 [11] +11 [12] + +-- !select_group_array_intersect_mv -- +\N [] +0 [1] +1 [2] +2 [3] +3 [4] +4 [5] +5 [6] +6 [7] +7 [8] +8 [9] +9 [10] +10 [11] +11 [12] + +-- !select_group_bit_and -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_group_bit_and_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_group_bit_or -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_group_bit_or_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_group_bit_xor -- +\N 1 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_group_bit_xor_mv -- +\N 1 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_group_bitmap_xor -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_group_bitmap_xor_mv -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_hll_union_agg -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_hll_union_agg_mv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_hll_union -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_hll_union_mv -- +\N \N +0 \N +1 \N +2 \N +3 \N +4 \N +5 \N +6 \N +7 \N +8 \N +9 \N +10 \N +11 \N + +-- !select_intersect_count -- +\N 0 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_intersect_count_mv -- +\N 0 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_group_concat -- +\N 1,1,1,1,1 +0 1,1,1,1,1,1,1,1,1,1 +1 2,2,2,2,2,2,2,2,2,2 +2 3,3,3,3,3,3,3,3,3,3 +3 4,4,4,4,4,4,4,4,4,4 +4 5,5,5,5,5,5,5,5,5,5 +5 6,6,6,6,6,6,6,6,6,6 +6 7,7,7,7,7,7,7,7,7,7 +7 8,8,8,8,8,8,8,8,8,8 +8 9,9,9,9,9,9,9,9,9,9 +9 10,10,10,10,10,10,10,10,10,10 +10 11,11,11,11,11,11,11,11,11,11 +11 12,12,12,12,12,12,12,12,12,12 + +-- !select_group_concat_mv -- +\N 1,1,1,1,1 +0 1,1,1,1,1,1,1,1,1,1 +1 2,2,2,2,2,2,2,2,2,2 +2 3,3,3,3,3,3,3,3,3,3 +3 4,4,4,4,4,4,4,4,4,4 +4 5,5,5,5,5,5,5,5,5,5 +5 6,6,6,6,6,6,6,6,6,6 +6 7,7,7,7,7,7,7,7,7,7 +7 8,8,8,8,8,8,8,8,8,8 +8 9,9,9,9,9,9,9,9,9,9 +9 10,10,10,10,10,10,10,10,10,10 +10 11,11,11,11,11,11,11,11,11,11 +11 12,12,12,12,12,12,12,12,12,12 + +-- !select_multi_distinct_group_concat -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_multi_distinct_group_concat_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_multi_distinct_sum0 -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_multi_distinct_sum0_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_multi_distinct_sum -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_multi_distinct_sum_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_histogram -- +\N {"num_buckets":1,"buckets":[{"lower":"1","upper":"1","ndv":1,"count":5,"pre_sum":0}]} +0 {"num_buckets":1,"buckets":[{"lower":"1","upper":"1","ndv":1,"count":10,"pre_sum":0}]} +1 {"num_buckets":1,"buckets":[{"lower":"2","upper":"2","ndv":1,"count":10,"pre_sum":0}]} +2 {"num_buckets":1,"buckets":[{"lower":"3","upper":"3","ndv":1,"count":10,"pre_sum":0}]} +3 {"num_buckets":1,"buckets":[{"lower":"4","upper":"4","ndv":1,"count":10,"pre_sum":0}]} +4 {"num_buckets":1,"buckets":[{"lower":"5","upper":"5","ndv":1,"count":10,"pre_sum":0}]} +5 {"num_buckets":1,"buckets":[{"lower":"6","upper":"6","ndv":1,"count":10,"pre_sum":0}]} +6 {"num_buckets":1,"buckets":[{"lower":"7","upper":"7","ndv":1,"count":10,"pre_sum":0}]} +7 {"num_buckets":1,"buckets":[{"lower":"8","upper":"8","ndv":1,"count":10,"pre_sum":0}]} +8 {"num_buckets":1,"buckets":[{"lower":"9","upper":"9","ndv":1,"count":10,"pre_sum":0}]} +9 {"num_buckets":1,"buckets":[{"lower":"10","upper":"10","ndv":1,"count":10,"pre_sum":0}]} +10 {"num_buckets":1,"buckets":[{"lower":"11","upper":"11","ndv":1,"count":10,"pre_sum":0}]} +11 {"num_buckets":1,"buckets":[{"lower":"12","upper":"12","ndv":1,"count":10,"pre_sum":0}]} + +-- !select_histogram_mv -- +\N {"num_buckets":1,"buckets":[{"lower":"1","upper":"1","ndv":1,"count":5,"pre_sum":0}]} +0 {"num_buckets":1,"buckets":[{"lower":"1","upper":"1","ndv":1,"count":10,"pre_sum":0}]} +1 {"num_buckets":1,"buckets":[{"lower":"2","upper":"2","ndv":1,"count":10,"pre_sum":0}]} +2 {"num_buckets":1,"buckets":[{"lower":"3","upper":"3","ndv":1,"count":10,"pre_sum":0}]} +3 {"num_buckets":1,"buckets":[{"lower":"4","upper":"4","ndv":1,"count":10,"pre_sum":0}]} +4 {"num_buckets":1,"buckets":[{"lower":"5","upper":"5","ndv":1,"count":10,"pre_sum":0}]} +5 {"num_buckets":1,"buckets":[{"lower":"6","upper":"6","ndv":1,"count":10,"pre_sum":0}]} +6 {"num_buckets":1,"buckets":[{"lower":"7","upper":"7","ndv":1,"count":10,"pre_sum":0}]} +7 {"num_buckets":1,"buckets":[{"lower":"8","upper":"8","ndv":1,"count":10,"pre_sum":0}]} +8 {"num_buckets":1,"buckets":[{"lower":"9","upper":"9","ndv":1,"count":10,"pre_sum":0}]} +9 {"num_buckets":1,"buckets":[{"lower":"10","upper":"10","ndv":1,"count":10,"pre_sum":0}]} +10 {"num_buckets":1,"buckets":[{"lower":"11","upper":"11","ndv":1,"count":10,"pre_sum":0}]} +11 {"num_buckets":1,"buckets":[{"lower":"12","upper":"12","ndv":1,"count":10,"pre_sum":0}]} + +-- !select_max_by -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_max_by_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_min_by -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_min_by_mv -- +\N 1 +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 +10 11 +11 12 + +-- !select_multi_distinct_count -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_multi_distinct_count_mv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_ndv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_ndv_mv -- +\N 1 +0 1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +10 1 +11 1 + +-- !select_covar -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_covar_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_covar_samp -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_covar_samp_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_percentile -- +\N 1.0 +0 1.0 +1 2.0 +2 3.0 +3 4.0 +4 5.0 +5 6.0 +6 7.0 +7 8.0 +8 9.0 +9 10.0 +10 11.0 +11 12.0 + +-- !select_percentile_mv -- +\N 1.0 +0 1.0 +1 2.0 +2 3.0 +3 4.0 +4 5.0 +5 6.0 +6 7.0 +7 8.0 +8 9.0 +9 10.0 +10 11.0 +11 12.0 + +-- !select_percentile_approx -- +\N 1.0 +0 1.0 +1 2.0 +2 3.0 +3 4.0 +4 5.0 +5 6.0 +6 7.0 +7 8.0 +8 9.0 +9 10.0 +10 11.0 +11 12.0 + +-- !select_percentile_approx_mv -- +\N 1.0 +0 1.0 +1 2.0 +2 3.0 +3 4.0 +4 5.0 +5 6.0 +6 7.0 +7 8.0 +8 9.0 +9 10.0 +10 11.0 +11 12.0 + +-- !select_sequence_count -- +\N 0 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_sequence_count_mv -- +\N 0 +0 0 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_sequence_match -- +\N false +0 false +1 false +2 false +3 false +4 false +5 false +6 false +7 false +8 false +9 false +10 false +11 false + +-- !select_sequence_match_mv -- +\N false +0 false +1 false +2 false +3 false +4 false +5 false +6 false +7 false +8 false +9 false +10 false +11 false + +-- !select_stddev -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_stddev_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_stddev_pop -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_stddev_pop_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_stddev_samp -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_stddev_samp_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_sum0 -- +\N 5 +0 10 +1 20 +2 30 +3 40 +4 50 +5 60 +6 70 +7 80 +8 90 +9 100 +10 110 +11 120 + +-- !select_sum0_mv -- +\N 5 +0 10 +1 20 +2 30 +3 40 +4 50 +5 60 +6 70 +7 80 +8 90 +9 100 +10 110 +11 120 + +-- !select_topn -- +\N {"null":10,"varchar11":5} +0 {"varchar11":10} +1 {"varchar12":10} +2 {"varchar13":10} +3 {"varchar11":10} +4 {"varchar12":10} +5 {"varchar13":10} +6 {"varchar11":10} +7 {"varchar12":10} +8 {"varchar13":10} +9 {"varchar11":10} +10 {"varchar12":10} +11 {"varchar13":10} + +-- !select_topn_mv -- +\N {"null":10,"varchar11":5} +0 {"varchar11":10} +1 {"varchar12":10} +2 {"varchar13":10} +3 {"varchar11":10} +4 {"varchar12":10} +5 {"varchar13":10} +6 {"varchar11":10} +7 {"varchar12":10} +8 {"varchar13":10} +9 {"varchar11":10} +10 {"varchar12":10} +11 {"varchar13":10} + +-- !select_topn_array -- +\N ["null", "varchar11"] +0 ["varchar11"] +1 ["varchar12"] +2 ["varchar13"] +3 ["varchar11"] +4 ["varchar12"] +5 ["varchar13"] +6 ["varchar11"] +7 ["varchar12"] +8 ["varchar13"] +9 ["varchar11"] +10 ["varchar12"] +11 ["varchar13"] + +-- !select_topn_array_mv -- +\N ["null", "varchar11"] +0 ["varchar11"] +1 ["varchar12"] +2 ["varchar13"] +3 ["varchar11"] +4 ["varchar12"] +5 ["varchar13"] +6 ["varchar11"] +7 ["varchar12"] +8 ["varchar13"] +9 ["varchar11"] +10 ["varchar12"] +11 ["varchar13"] + +-- !select_topn_weighted -- +\N ["varchar11"] +0 ["varchar11"] +1 ["varchar12"] +2 ["varchar13"] +3 ["varchar11"] +4 ["varchar12"] +5 ["varchar13"] +6 ["varchar11"] +7 ["varchar12"] +8 ["varchar13"] +9 ["varchar11"] +10 ["varchar12"] +11 ["varchar13"] + +-- !select_topn_weighted_mv -- +\N ["varchar11"] +0 ["varchar11"] +1 ["varchar12"] +2 ["varchar13"] +3 ["varchar11"] +4 ["varchar12"] +5 ["varchar13"] +6 ["varchar11"] +7 ["varchar12"] +8 ["varchar13"] +9 ["varchar11"] +10 ["varchar12"] +11 ["varchar13"] + +-- !select_variance -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_variance_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_var_pop -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_var_pop_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_variance_samp -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_variance_samp_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_var_samp -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_var_samp_mv -- +\N 0.0 +0 0.0 +1 0.0 +2 0.0 +3 0.0 +4 0.0 +5 0.0 +6 0.0 +7 0.0 +8 0.0 +9 0.0 +10 0.0 +11 0.0 + +-- !select_window_funnel -- +\N 1 +0 1 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_window_funnel_mv -- +\N 1 +0 1 +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 0 +9 0 +10 0 +11 0 + +-- !select_retention -- +\N [0, 0] +0 [0, 0] +1 [0, 0] +2 [0, 0] +3 [0, 0] +4 [0, 0] +5 [0, 0] +6 [0, 0] +7 [0, 0] +8 [0, 0] +9 [0, 0] +10 [0, 0] +11 [0, 0] + +-- !select_retention_mv -- +\N [0, 0] +0 [0, 0] +1 [0, 0] +2 [0, 0] +3 [0, 0] +4 [0, 0] +5 [0, 0] +6 [0, 0] +7 [0, 0] +8 [0, 0] +9 [0, 0] +10 [0, 0] +11 [0, 0] + +-- !test -- +\N {} +1 {0:"string1"} +2 {1:"string2"} +3 {2:"string3"} +4 {3:"string1"} +5 {4:"string2"} +6 {5:"string3"} +7 {6:"string1"} +8 {7:"string2"} +9 {8:"string3"} +10 {9:"string1"} +11 {10:"string2"} +12 {11:"string3"} + diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query1.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query1.out index fbd48ddbc0936c..e4a4bd5e427fa9 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query1.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject -----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF2 ctr_store_sk->[ctr_store_sk,s_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF1 s_store_sk->[ctr_store_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 ----------------------PhysicalProject ------------------------filter((store.s_state = 'TN')) ---------------------------PhysicalOlapScan[store] +--------------------------PhysicalOlapScan[store] apply RFs: RF2 ------------------hashAgg[GLOBAL] --------------------PhysicalDistribute[DistributionSpecHash] ----------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out index 828c9a910d6c7a..5593df9194937d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 1999) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 1999) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 1998) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1998) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out index 5fb881643448c0..813924b9b99e18 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out @@ -97,16 +97,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject --------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +120,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +143,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query23.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query23.out index 480608162ed25e..5e2eafb60ffe8d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query23.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 7) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 7) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query30.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query30.out index c8382b090048c6..8bea4f656a6132 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query30.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query30.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query31.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query31.out index ed1de5614536fb..f17ab6cf863612 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query31.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 1999)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 1999)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 1999)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 1999)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 1999)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 1999)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query39.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query39.out index 6b69ec2c1355eb..7b00628d966265 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query39.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out index 1946f8020ded90..d99f0294700040 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query41.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query41.out index c27e19cc9f2387..2dd4aadeae29a7 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query41.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 744) and (i1.i_manufact_id >= 704)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query47.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query47.out index a7bc45eeacdd3f..0ea84e3ca2b33d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query47.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query57.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query57.out index 5c47b7ae249995..356837343861e9 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query57.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category,i_category];RF8 i_brand->[i_brand,i_brand];RF9 cc_name->[cc_name,cc_name];RF10 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query64.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query64.out index 6745146c3159d9..37ce378c5bd7cc 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query64.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query64.out @@ -7,94 +7,95 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) --------PhysicalDistribute[DistributionSpecHash] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_shipto_date_sk = d3.d_date_sk)) otherCondition=() build RFs:RF19 d_date_sk->[c_first_shipto_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[cr_item_sk,cs_item_sk,sr_item_sk,ss_item_sk] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store_sales.ss_customer_sk = customer.c_customer_sk)) otherCondition=(( not (cd_marital_status = cd_marital_status))) build RFs:RF18 c_customer_sk->[ss_customer_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd2.hd_income_band_sk = ib2.ib_income_band_sk)) otherCondition=() build RFs:RF18 ib_income_band_sk->[hd_income_band_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_promo_sk = promotion.p_promo_sk)) otherCondition=() build RFs:RF17 p_promo_sk->[ss_promo_sk] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd1.hd_income_band_sk = ib1.ib_income_band_sk)) otherCondition=() build RFs:RF17 ib_income_band_sk->[hd_income_band_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_addr_sk = ad1.ca_address_sk)) otherCondition=() build RFs:RF16 ss_addr_sk->[ca_address_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_addr_sk = ad2.ca_address_sk)) otherCondition=() build RFs:RF16 ca_address_sk->[c_current_addr_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[customer_address] apply RFs: RF16 -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((store_sales.ss_item_sk = store_returns.sr_item_sk) and (store_sales.ss_ticket_number = store_returns.sr_ticket_number)) otherCondition=() build RFs:RF14 ss_item_sk->[sr_item_sk];RF15 ss_ticket_number->[sr_ticket_number] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_returns] apply RFs: RF14 RF15 +------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_addr_sk = ad1.ca_address_sk)) otherCondition=() build RFs:RF15 ca_address_sk->[ss_addr_sk] --------------------------------PhysicalProject -----------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_cdemo_sk = cd1.cd_demo_sk)) otherCondition=() build RFs:RF13 cd_demo_sk->[ss_cdemo_sk] -------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[cr_item_sk,cs_item_sk,ss_item_sk] ---------------------------------------PhysicalProject -----------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_store_sk = store.s_store_sk)) otherCondition=() build RFs:RF11 s_store_sk->[ss_store_sk] -------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd1.hd_income_band_sk = ib1.ib_income_band_sk)) otherCondition=() build RFs:RF10 ib_income_band_sk->[hd_income_band_sk] -----------------------------------------------PhysicalProject -------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_hdemo_sk = hd1.hd_demo_sk)) otherCondition=() build RFs:RF9 hd_demo_sk->[ss_hdemo_sk] ---------------------------------------------------PhysicalProject -----------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cs_ui.cs_item_sk)) otherCondition=() build RFs:RF8 cs_item_sk->[ss_item_sk] -------------------------------------------------------PhysicalProject ---------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = d1.d_date_sk)) otherCondition=() build RFs:RF7 d_date_sk->[ss_sold_date_sk] -----------------------------------------------------------PhysicalProject -------------------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF7 RF8 RF9 RF11 RF12 RF13 RF17 RF18 -----------------------------------------------------------PhysicalProject -------------------------------------------------------------filter(d_year IN (1999, 2000)) ---------------------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------------------PhysicalProject ---------------------------------------------------------filter((sale > (2 * refund))) -----------------------------------------------------------hashAgg[GLOBAL] -------------------------------------------------------------PhysicalDistribute[DistributionSpecHash] ---------------------------------------------------------------hashAgg[LOCAL] +----------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_hdemo_sk = hd2.hd_demo_sk)) otherCondition=() build RFs:RF14 hd_demo_sk->[c_current_hdemo_sk] +------------------------------------PhysicalProject +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_hdemo_sk = hd1.hd_demo_sk)) otherCondition=() build RFs:RF13 hd_demo_sk->[ss_hdemo_sk] +----------------------------------------PhysicalProject +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_promo_sk = promotion.p_promo_sk)) otherCondition=() build RFs:RF12 p_promo_sk->[ss_promo_sk] +--------------------------------------------PhysicalProject +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_cdemo_sk = cd2.cd_demo_sk)) otherCondition=(( not (cd_marital_status = cd_marital_status))) build RFs:RF11 cd_demo_sk->[c_current_cdemo_sk] +------------------------------------------------PhysicalProject +--------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_cdemo_sk = cd1.cd_demo_sk)) otherCondition=() build RFs:RF10 cd_demo_sk->[ss_cdemo_sk] +----------------------------------------------------PhysicalProject +------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_shipto_date_sk = d3.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[c_first_shipto_date_sk] +--------------------------------------------------------PhysicalProject +----------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_sales_date_sk = d2.d_date_sk)) otherCondition=() build RFs:RF8 d_date_sk->[c_first_sales_date_sk] +------------------------------------------------------------PhysicalProject +--------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ss_customer_sk] ----------------------------------------------------------------PhysicalProject -------------------------------------------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((catalog_sales.cs_item_sk = catalog_returns.cr_item_sk) and (catalog_sales.cs_order_number = catalog_returns.cr_order_number)) otherCondition=() build RFs:RF5 cr_item_sk->[cs_item_sk];RF6 cr_order_number->[cs_order_number] +------------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_store_sk = store.s_store_sk)) otherCondition=() build RFs:RF6 s_store_sk->[ss_store_sk] --------------------------------------------------------------------PhysicalProject -----------------------------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF5 RF6 RF12 +----------------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = d1.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ss_sold_date_sk] +------------------------------------------------------------------------PhysicalProject +--------------------------------------------------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((store_sales.ss_item_sk = store_returns.sr_item_sk) and (store_sales.ss_ticket_number = store_returns.sr_ticket_number)) otherCondition=() build RFs:RF3 sr_item_sk->[cr_item_sk,cs_item_sk,ss_item_sk];RF4 sr_ticket_number->[ss_ticket_number] +----------------------------------------------------------------------------PhysicalProject +------------------------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cs_ui.cs_item_sk)) otherCondition=() build RFs:RF2 cs_item_sk->[ss_item_sk] +--------------------------------------------------------------------------------PhysicalProject +----------------------------------------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 RF5 RF6 RF7 RF10 RF12 RF13 RF15 RF19 +--------------------------------------------------------------------------------PhysicalProject +----------------------------------------------------------------------------------filter((sale > (2 * refund))) +------------------------------------------------------------------------------------hashAgg[GLOBAL] +--------------------------------------------------------------------------------------PhysicalDistribute[DistributionSpecHash] +----------------------------------------------------------------------------------------hashAgg[LOCAL] +------------------------------------------------------------------------------------------PhysicalProject +--------------------------------------------------------------------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((catalog_sales.cs_item_sk = catalog_returns.cr_item_sk) and (catalog_sales.cs_order_number = catalog_returns.cr_order_number)) otherCondition=() build RFs:RF0 cr_item_sk->[cs_item_sk];RF1 cr_order_number->[cs_order_number] +----------------------------------------------------------------------------------------------PhysicalProject +------------------------------------------------------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF3 RF19 +----------------------------------------------------------------------------------------------PhysicalProject +------------------------------------------------------------------------------------------------PhysicalOlapScan[catalog_returns] apply RFs: RF3 RF19 +----------------------------------------------------------------------------PhysicalProject +------------------------------------------------------------------------------PhysicalOlapScan[store_returns] apply RFs: RF19 +------------------------------------------------------------------------PhysicalProject +--------------------------------------------------------------------------filter(d_year IN (1999, 2000)) +----------------------------------------------------------------------------PhysicalOlapScan[date_dim] --------------------------------------------------------------------PhysicalProject -----------------------------------------------------------------------PhysicalOlapScan[catalog_returns] apply RFs: RF12 ---------------------------------------------------PhysicalProject -----------------------------------------------------PhysicalOlapScan[household_demographics] apply RFs: RF10 -----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[income_band] -------------------------------------------PhysicalProject ---------------------------------------------PhysicalOlapScan[store] ---------------------------------------PhysicalProject -----------------------------------------filter((item.i_current_price <= 58.00) and (item.i_current_price >= 49.00) and i_color IN ('blush', 'lace', 'lawn', 'misty', 'orange', 'pink')) -------------------------------------------PhysicalOlapScan[item] -------------------------------------PhysicalProject ---------------------------------------PhysicalOlapScan[customer_demographics] -------------------------PhysicalProject ---------------------------PhysicalOlapScan[promotion] ---------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((customer.c_current_addr_sk = ad2.ca_address_sk)) otherCondition=() build RFs:RF4 c_current_addr_sk->[ca_address_sk] -------------------------PhysicalProject ---------------------------PhysicalOlapScan[customer_address] apply RFs: RF4 -------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_cdemo_sk = cd2.cd_demo_sk)) otherCondition=() build RFs:RF3 cd_demo_sk->[c_current_cdemo_sk] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_sales_date_sk = d2.d_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[c_first_sales_date_sk] ---------------------------------PhysicalProject -----------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_hdemo_sk = hd2.hd_demo_sk)) otherCondition=() build RFs:RF1 hd_demo_sk->[c_current_hdemo_sk] -------------------------------------PhysicalProject ---------------------------------------PhysicalOlapScan[customer] apply RFs: RF1 RF2 RF3 RF19 -------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd2.hd_income_band_sk = ib2.ib_income_band_sk)) otherCondition=() build RFs:RF0 ib_income_band_sk->[hd_income_band_sk] -----------------------------------------PhysicalProject -------------------------------------------PhysicalOlapScan[household_demographics] apply RFs: RF0 +----------------------------------------------------------------------PhysicalOlapScan[store] +----------------------------------------------------------------PhysicalProject +------------------------------------------------------------------PhysicalOlapScan[customer] apply RFs: RF8 RF9 RF11 RF14 RF16 +------------------------------------------------------------PhysicalProject +--------------------------------------------------------------PhysicalOlapScan[date_dim] +--------------------------------------------------------PhysicalProject +----------------------------------------------------------PhysicalOlapScan[date_dim] +----------------------------------------------------PhysicalProject +------------------------------------------------------PhysicalOlapScan[customer_demographics] +------------------------------------------------PhysicalProject +--------------------------------------------------PhysicalOlapScan[customer_demographics] +--------------------------------------------PhysicalProject +----------------------------------------------PhysicalOlapScan[promotion] ----------------------------------------PhysicalProject -------------------------------------------PhysicalOlapScan[income_band] +------------------------------------------PhysicalOlapScan[household_demographics] apply RFs: RF17 +------------------------------------PhysicalProject +--------------------------------------PhysicalOlapScan[household_demographics] apply RFs: RF18 --------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[date_dim] +----------------------------------PhysicalOlapScan[customer_address] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[customer_demographics] +------------------------------PhysicalOlapScan[customer_address] +------------------------PhysicalProject +--------------------------PhysicalOlapScan[income_band] +--------------------PhysicalProject +----------------------PhysicalOlapScan[income_band] ----------------PhysicalProject -------------------PhysicalOlapScan[date_dim] +------------------filter((item.i_current_price <= 58.00) and (item.i_current_price >= 49.00) and i_color IN ('blush', 'lace', 'lawn', 'misty', 'orange', 'pink')) +--------------------PhysicalOlapScan[item] --PhysicalResultSink ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 1999)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2000)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out index ebb6f5a717b8c0..8b171914ebd371 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL) > if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL) > if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query75.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query75.out index 51355f21bc2517..c26b81b87791ba 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query75.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 2002)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 2001)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query81.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query81.out index 07e251e38f2a40..97ae3085a37ff7 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query81.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query81.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/constraints/query23.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/constraints/query23.out index c96446fbfcb1aa..85217462d1a5ed 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/constraints/query23.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/constraints/query23.out @@ -52,26 +52,26 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() +------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 item_sk->[cs_item_sk] --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) --------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject -------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() +------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 item_sk->[ws_item_sk] --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF4 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF4 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 RF8 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out index 01b1d2752e59b5..a30f38dbe4c49a 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out index ed56e0191fcf33..d08a6aedb094e3 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out @@ -97,12 +97,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF14 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] @@ -120,12 +120,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF17 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] @@ -143,12 +143,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF17 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF20 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query23.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query23.out index a8c20854755264..aa0bb3fa70d0e7 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query23.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] --------------------PhysicalProject ----------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() ------------------------PhysicalProject --------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF5 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) ------------------------PhysicalOlapScan[date_dim] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF8 d_date_sk->[ws_sold_date_sk] --------------------PhysicalProject -----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 ws_item_sk->[item_sk] -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF7 ws_item_sk->[item_sk] +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------------PhysicalProject --------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF8 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query31.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query31.out index 1d5a399c9451be..6c244739da9ca8 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query31.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county,ca_county,ca_county,ca_county] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) ---------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF7 ca_county->[ca_county,ca_county,ca_county] +--------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF6 ca_county->[ca_county,ca_county] ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 RF7 RF8 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 RF7 RF8 --------------------------PhysicalProject ----------------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------PhysicalProject ------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) ---------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF7 RF8 --------------------PhysicalProject ----------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) --------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query39.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query39.out index 9fd32d5844c2c6..d906073878075f 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query39.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out index 63f30b978f4d68..19e9098ee4555e 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id,customer_id,customer_id,customer_id] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF7 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF7 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query41.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query41.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query47.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query47.out index c807b2d355e616..e2d0ecdb0ba6ac 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query47.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query47.out @@ -35,9 +35,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------PhysicalProject --------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query57.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query57.out index 0de9d7afec0114..10cbd1fa9a0489 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query57.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category];RF8 i_brand->[i_brand];RF9 cc_name->[cc_name];RF10 rn->[(rn - 1)] ----------------PhysicalProject -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query64.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query64.out index 9101d1e89b4514..2cff316c5a22aa 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query64.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query64.out @@ -92,10 +92,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out index ac1a7051ce017c..d8a82ca998ac09 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query75.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query75.out index dded6a5bb0e7d7..7a6c63c2385f24 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query75.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query75.out @@ -70,9 +70,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF6 i_brand_id->[i_brand_id];RF7 i_class_id->[i_class_id];RF8 i_category_id->[i_category_id];RF9 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 RF8 RF9 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query1.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query1.out index 9da3e4ef561a0b..c9404710bfa16f 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query1.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF3 ctr_store_sk->[ctr_store_sk,s_store_sk] --------------PhysicalProject ----------------hashJoin[INNER_JOIN broadcast] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF2 s_store_sk->[ctr_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF1 c_customer_sk->[ctr_customer_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 RF3 ----------------------PhysicalProject ------------------------PhysicalOlapScan[customer] ------------------PhysicalProject --------------------filter((store.s_state = 'SD')) -----------------------PhysicalOlapScan[store] +----------------------PhysicalOlapScan[store] apply RFs: RF3 --------------hashAgg[GLOBAL] ----------------PhysicalDistribute[DistributionSpecHash] ------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out index b44acb09519210..eaf1cde7b0a304 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out index bb695462d53376..04bf133065b269 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out @@ -97,13 +97,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF12 ss_item_sk->[ss_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -120,13 +120,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[cs_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[cs_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF15 ss_item_sk->[cs_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF16 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -143,13 +143,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[ss_item_sk,ws_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[ss_item_sk,ws_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF18 ss_item_sk->[ws_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF16 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF19 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query23.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query23.out index 873b34450814bd..26e1f5089288a1 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query23.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 item_sk->[cs_item_sk] ------------------------PhysicalProject ---------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +--------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 RF5 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) ------------------------PhysicalOlapScan[date_dim] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF8 d_date_sk->[ws_sold_date_sk] --------------------PhysicalProject -----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 ws_item_sk->[item_sk] -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF7 ws_item_sk->[item_sk] +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------------PhysicalProject ---------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +--------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF6 c_customer_sk->[ws_bill_customer_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF8 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query30.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query30.out index 429f135a959efd..748165ced2fb2a 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query30.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query30.out @@ -22,12 +22,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF3 ca_address_sk->[c_current_addr_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ctr_customer_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 RF4 ----------------------PhysicalProject ------------------------PhysicalOlapScan[customer] apply RFs: RF3 ------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query31.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query31.out index fb1cee757a39db..2a86f699341a7b 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query31.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county,ca_county,ca_county,ca_county] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) ---------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF7 ca_county->[ca_county,ca_county,ca_county] +--------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF6 ca_county->[ca_county,ca_county] ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 RF7 RF8 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 RF7 RF8 --------------------------PhysicalProject ----------------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------PhysicalProject ------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) ---------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF7 RF8 --------------------PhysicalProject ----------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) --------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query39.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query39.out index 714a012c9f2cc5..90c507f9c536f5 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query39.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out index 2ab0ebfe7fb09c..546119842b58bd 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id,customer_id,customer_id,customer_id] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF7 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF7 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query41.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query41.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query47.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query47.out index 659f33cec98833..b27001e19a607d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query47.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query57.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query57.out index 17896b5f964a2d..018e37ee77fbfa 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query57.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category];RF8 i_brand->[i_brand];RF9 cc_name->[cc_name];RF10 rn->[(rn - 1)] ----------------PhysicalProject -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query64.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query64.out index edaca21609264f..d1d49f78ba895b 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query64.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query64.out @@ -92,10 +92,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out index 58e91a6a35dcf3..64a56e4e850db7 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query75.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query75.out index dded6a5bb0e7d7..7a6c63c2385f24 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query75.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query75.out @@ -70,9 +70,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF6 i_brand_id->[i_brand_id];RF7 i_class_id->[i_class_id];RF8 i_category_id->[i_category_id];RF9 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 RF8 RF9 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query81.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query81.out index 520a159f3c1413..c06a48a1f5f922 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query81.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query81.out @@ -22,12 +22,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF3 ca_address_sk->[c_current_addr_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ctr_customer_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 RF4 ----------------------PhysicalProject ------------------------PhysicalOlapScan[customer] apply RFs: RF3 ------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query1.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query1.out index ee435a29842897..9ee4e3a3195cb8 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query1.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query1.out @@ -18,9 +18,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject ----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) ------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out index 9da76c59cd36a9..b699aa67e934a0 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out index 90ab0efba365c4..9d7cfc860ad6a4 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out @@ -123,9 +123,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() ----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] @@ -146,9 +146,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() ----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query23.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query23.out index 3ac1b889aec542..1343c4f4fb0214 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query23.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query31.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query31.out index 2b5d8d54715328..eb49a9cb10b936 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query31.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query39.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query39.out index 6c438c8fe0bc3a..899b1a5e0bdd99 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query39.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out index 61bce070b34aaa..43138dc7c62651 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query41.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query41.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query47.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query47.out index e9c29da52d61a9..52bd4fa6fbf623 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query47.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query47.out @@ -35,9 +35,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------PhysicalProject --------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query57.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query57.out index ae1d9e8b5afbfe..60276268fc3224 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query57.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query57.out @@ -35,9 +35,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------PhysicalProject --------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query64.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query64.out index aa03eafaa473d9..b70b5ccdca32df 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query64.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query64.out @@ -7,94 +7,95 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) --------PhysicalDistribute[DistributionSpecHash] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_shipto_date_sk = d3.d_date_sk)) otherCondition=() +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[cr_item_sk,cs_item_sk,sr_item_sk,ss_item_sk] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_sales_date_sk = d2.d_date_sk)) otherCondition=() +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd2.hd_income_band_sk = ib2.ib_income_band_sk)) otherCondition=() --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store_sales.ss_customer_sk = customer.c_customer_sk)) otherCondition=(( not (cd_marital_status = cd_marital_status))) build RFs:RF17 ss_customer_sk->[c_customer_sk] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd1.hd_income_band_sk = ib1.ib_income_band_sk)) otherCondition=() ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((customer.c_current_addr_sk = ad2.ca_address_sk)) otherCondition=() build RFs:RF16 c_current_addr_sk->[ca_address_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_addr_sk = ad2.ca_address_sk)) otherCondition=() ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[customer_address] apply RFs: RF16 -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((customer.c_current_cdemo_sk = cd2.cd_demo_sk)) otherCondition=() +------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_addr_sk = ad1.ca_address_sk)) otherCondition=() --------------------------------PhysicalProject ----------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_hdemo_sk = hd2.hd_demo_sk)) otherCondition=() ------------------------------------PhysicalProject ---------------------------------------PhysicalOlapScan[customer] apply RFs: RF17 -------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd2.hd_income_band_sk = ib2.ib_income_band_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_hdemo_sk = hd1.hd_demo_sk)) otherCondition=() ----------------------------------------PhysicalProject -------------------------------------------PhysicalOlapScan[household_demographics] -----------------------------------------PhysicalProject -------------------------------------------PhysicalOlapScan[income_band] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[customer_demographics] -------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = store_returns.sr_item_sk) and (store_sales.ss_ticket_number = store_returns.sr_ticket_number)) otherCondition=() build RFs:RF11 ss_item_sk->[sr_item_sk];RF12 ss_ticket_number->[sr_ticket_number] -----------------------------PhysicalProject -------------------------------PhysicalOlapScan[store_returns] apply RFs: RF11 RF12 -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_addr_sk = ad1.ca_address_sk)) otherCondition=() build RFs:RF10 ss_addr_sk->[ca_address_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[customer_address] apply RFs: RF10 ---------------------------------PhysicalProject -----------------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store_sales.ss_cdemo_sk = cd1.cd_demo_sk)) otherCondition=() build RFs:RF9 ss_cdemo_sk->[cd_demo_sk] -------------------------------------PhysicalProject ---------------------------------------PhysicalOlapScan[customer_demographics] apply RFs: RF9 -------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF8 i_item_sk->[cr_item_sk,cs_item_sk,ss_item_sk] ---------------------------------------PhysicalProject -----------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_promo_sk = promotion.p_promo_sk)) otherCondition=() -------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_store_sk = store.s_store_sk)) otherCondition=() -----------------------------------------------PhysicalProject -------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((hd1.hd_income_band_sk = ib1.ib_income_band_sk)) otherCondition=() ---------------------------------------------------PhysicalProject -----------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_hdemo_sk = hd1.hd_demo_sk)) otherCondition=() -------------------------------------------------------PhysicalProject ---------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cs_ui.cs_item_sk)) otherCondition=() build RFs:RF3 cs_item_sk->[ss_item_sk] -----------------------------------------------------------PhysicalProject -------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = d1.d_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------------------------------------PhysicalProject -----------------------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF8 ---------------------------------------------------------------PhysicalProject -----------------------------------------------------------------filter(d_year IN (2001, 2002)) -------------------------------------------------------------------PhysicalOlapScan[date_dim] -----------------------------------------------------------PhysicalProject -------------------------------------------------------------filter((sale > (2 * refund))) ---------------------------------------------------------------hashAgg[GLOBAL] -----------------------------------------------------------------PhysicalDistribute[DistributionSpecHash] -------------------------------------------------------------------hashAgg[LOCAL] +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_promo_sk = promotion.p_promo_sk)) otherCondition=() +--------------------------------------------PhysicalProject +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_current_cdemo_sk = cd2.cd_demo_sk)) otherCondition=(( not (cd_marital_status = cd_marital_status))) +------------------------------------------------PhysicalProject +--------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_cdemo_sk = cd1.cd_demo_sk)) otherCondition=() +----------------------------------------------------PhysicalProject +------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_shipto_date_sk = d3.d_date_sk)) otherCondition=() +--------------------------------------------------------PhysicalProject +----------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer.c_first_sales_date_sk = d2.d_date_sk)) otherCondition=() +------------------------------------------------------------PhysicalProject +--------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_customer_sk = customer.c_customer_sk)) otherCondition=() +----------------------------------------------------------------PhysicalProject +------------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_store_sk = store.s_store_sk)) otherCondition=() --------------------------------------------------------------------PhysicalProject -----------------------------------------------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((catalog_sales.cs_item_sk = catalog_returns.cr_item_sk) and (catalog_sales.cs_order_number = catalog_returns.cr_order_number)) otherCondition=() +----------------------------------------------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((store_sales.ss_item_sk = store_returns.sr_item_sk) and (store_sales.ss_ticket_number = store_returns.sr_ticket_number)) otherCondition=() build RFs:RF4 sr_item_sk->[cr_item_sk,cs_item_sk] ------------------------------------------------------------------------PhysicalProject ---------------------------------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF8 +--------------------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cs_ui.cs_item_sk)) otherCondition=() build RFs:RF3 cs_item_sk->[ss_item_sk] +----------------------------------------------------------------------------PhysicalProject +------------------------------------------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = d1.d_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] +--------------------------------------------------------------------------------PhysicalProject +----------------------------------------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF19 +--------------------------------------------------------------------------------PhysicalProject +----------------------------------------------------------------------------------filter(d_year IN (2001, 2002)) +------------------------------------------------------------------------------------PhysicalOlapScan[date_dim] +----------------------------------------------------------------------------PhysicalProject +------------------------------------------------------------------------------filter((sale > (2 * refund))) +--------------------------------------------------------------------------------hashAgg[GLOBAL] +----------------------------------------------------------------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------------------------------------------------------------hashAgg[LOCAL] +--------------------------------------------------------------------------------------PhysicalProject +----------------------------------------------------------------------------------------hashJoin[INNER_JOIN colocated] hashCondition=((catalog_sales.cs_item_sk = catalog_returns.cr_item_sk) and (catalog_sales.cs_order_number = catalog_returns.cr_order_number)) otherCondition=() +------------------------------------------------------------------------------------------PhysicalProject +--------------------------------------------------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF19 +------------------------------------------------------------------------------------------PhysicalProject +--------------------------------------------------------------------------------------------PhysicalOlapScan[catalog_returns] apply RFs: RF4 RF19 ------------------------------------------------------------------------PhysicalProject ---------------------------------------------------------------------------PhysicalOlapScan[catalog_returns] apply RFs: RF8 -------------------------------------------------------PhysicalProject ---------------------------------------------------------PhysicalOlapScan[household_demographics] ---------------------------------------------------PhysicalProject -----------------------------------------------------PhysicalOlapScan[income_band] -----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store] -------------------------------------------PhysicalProject ---------------------------------------------PhysicalOlapScan[promotion] ---------------------------------------PhysicalProject -----------------------------------------filter((item.i_current_price <= 33.00) and (item.i_current_price >= 24.00) and i_color IN ('blanched', 'brown', 'burlywood', 'chocolate', 'drab', 'medium')) -------------------------------------------PhysicalOlapScan[item] +--------------------------------------------------------------------------PhysicalOlapScan[store_returns] apply RFs: RF19 +--------------------------------------------------------------------PhysicalProject +----------------------------------------------------------------------PhysicalOlapScan[store] +----------------------------------------------------------------PhysicalProject +------------------------------------------------------------------PhysicalOlapScan[customer] +------------------------------------------------------------PhysicalProject +--------------------------------------------------------------PhysicalOlapScan[date_dim] +--------------------------------------------------------PhysicalProject +----------------------------------------------------------PhysicalOlapScan[date_dim] +----------------------------------------------------PhysicalProject +------------------------------------------------------PhysicalOlapScan[customer_demographics] +------------------------------------------------PhysicalProject +--------------------------------------------------PhysicalOlapScan[customer_demographics] +--------------------------------------------PhysicalProject +----------------------------------------------PhysicalOlapScan[promotion] +----------------------------------------PhysicalProject +------------------------------------------PhysicalOlapScan[household_demographics] +------------------------------------PhysicalProject +--------------------------------------PhysicalOlapScan[household_demographics] +--------------------------------PhysicalProject +----------------------------------PhysicalOlapScan[customer_address] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[customer_address] +------------------------PhysicalProject +--------------------------PhysicalOlapScan[income_band] --------------------PhysicalProject -----------------------PhysicalOlapScan[date_dim] +----------------------PhysicalOlapScan[income_band] ----------------PhysicalProject -------------------PhysicalOlapScan[date_dim] +------------------filter((item.i_current_price <= 33.00) and (item.i_current_price >= 24.00) and i_color IN ('blanched', 'brown', 'burlywood', 'chocolate', 'drab', 'medium')) +--------------------PhysicalOlapScan[item] --PhysicalResultSink ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out index d58dde1f36cc23..f300a896a4d563 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query75.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query75.out index 70c4bbd1244c84..921d754e533285 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query75.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query1.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query1.out index ee435a29842897..4569832b6d5aa6 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query1.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject -----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF2 ctr_store_sk->[ctr_store_sk,s_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF1 s_store_sk->[ctr_store_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 ----------------------PhysicalProject ------------------------filter((store.s_state = 'SD')) ---------------------------PhysicalOlapScan[store] +--------------------------PhysicalOlapScan[store] apply RFs: RF2 ------------------hashAgg[GLOBAL] --------------------PhysicalDistribute[DistributionSpecHash] ----------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out index 73db27a320dcf0..bcfe7ba3d74e79 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out index 2e183bfdc176d4..8a1467be7a4a58 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out @@ -97,16 +97,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject --------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +120,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +143,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query23.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query23.out index 7ae34bb3eb2975..91ab82284bc473 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query23.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query30.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query30.out index 9c11294b4ec73f..b60e62f5e7bade 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query30.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query30.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query31.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query31.out index acec5a66b5dc11..c768d674841b33 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query31.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query39.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query39.out index e1f48b088a80d5..b7ca740e55c672 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query39.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out index 1946f8020ded90..d99f0294700040 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query41.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query41.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query47.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query47.out index bba74448d30ce6..f33181c085733c 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query47.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query57.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query57.out index 06e9f277d5a2a3..15dda452ebc984 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query57.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category,i_category];RF8 i_brand->[i_brand,i_brand];RF9 cc_name->[cc_name,cc_name];RF10 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query64.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query64.out index 5db220a63dbd6d..68698c4fbb3894 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query64.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query64.out @@ -91,10 +91,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out index 415df0902a0dad..0f159a647c03de 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query75.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query75.out index 70c4bbd1244c84..921d754e533285 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query75.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query81.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query81.out index 5bf233e6df1eae..27fc8430ff3e83 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query81.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query81.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/hint_tpcds/shape/query1.out b/regression-test/data/new_shapes_p0/hint_tpcds/shape/query1.out index 6135d8dff62b45..401b9bd4b037c9 100644 --- a/regression-test/data/new_shapes_p0/hint_tpcds/shape/query1.out +++ b/regression-test/data/new_shapes_p0/hint_tpcds/shape/query1.out @@ -18,16 +18,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF3 ctr_store_sk->[ctr_store_sk] --------------hashAgg[GLOBAL] ----------------PhysicalDistribute[DistributionSpecHash] ------------------hashAgg[LOCAL] --------------------PhysicalDistribute[DistributionSpecExecutionAny] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 --------------PhysicalProject -----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 ctr_customer_sk->[c_customer_sk] ------------------PhysicalProject ---------------------PhysicalOlapScan[customer] +--------------------PhysicalOlapScan[customer] apply RFs: RF2 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF1 s_store_sk->[ctr_store_sk] ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 diff --git a/regression-test/data/new_shapes_p0/hint_tpcds/shape/query64.out b/regression-test/data/new_shapes_p0/hint_tpcds/shape/query64.out index b33dcb2a77ae65..26a67aa0d6e85a 100644 --- a/regression-test/data/new_shapes_p0/hint_tpcds/shape/query64.out +++ b/regression-test/data/new_shapes_p0/hint_tpcds/shape/query64.out @@ -91,10 +91,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 1999)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2000)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/constraints/query23.out b/regression-test/data/new_shapes_p0/tpcds_sf100/constraints/query23.out index c96446fbfcb1aa..85217462d1a5ed 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/constraints/query23.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/constraints/query23.out @@ -52,26 +52,26 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() +------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 item_sk->[cs_item_sk] --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) --------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject -------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() +------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 item_sk->[ws_item_sk] --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF4 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF4 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 RF8 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out index 01b1d2752e59b5..a30f38dbe4c49a 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out index c13ef52c492bf4..1c17f5a78a9ca3 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out @@ -97,12 +97,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF14 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] @@ -120,13 +120,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() --------------------------------------------PhysicalProject ----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF17 ------------------------------------------------PhysicalProject --------------------------------------------------PhysicalOlapScan[item] --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) @@ -144,13 +144,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() --------------------------------------------PhysicalProject ----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF17 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF20 ------------------------------------------------PhysicalProject --------------------------------------------------PhysicalOlapScan[item] --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query23.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query23.out index a8c20854755264..aa0bb3fa70d0e7 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query23.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] --------------------PhysicalProject ----------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() ------------------------PhysicalProject --------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF5 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) ------------------------PhysicalOlapScan[date_dim] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF8 d_date_sk->[ws_sold_date_sk] --------------------PhysicalProject -----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 ws_item_sk->[item_sk] -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF7 ws_item_sk->[item_sk] +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------------PhysicalProject --------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF8 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query31.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query31.out index 1d5a399c9451be..6c244739da9ca8 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query31.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county,ca_county,ca_county,ca_county] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) ---------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF7 ca_county->[ca_county,ca_county,ca_county] +--------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF6 ca_county->[ca_county,ca_county] ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 RF7 RF8 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 RF7 RF8 --------------------------PhysicalProject ----------------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------PhysicalProject ------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) ---------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF7 RF8 --------------------PhysicalProject ----------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) --------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query39.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query39.out index 9fd32d5844c2c6..d906073878075f 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query39.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out index 63f30b978f4d68..19e9098ee4555e 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id,customer_id,customer_id,customer_id] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF7 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF7 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query41.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query41.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query47.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query47.out index c807b2d355e616..e2d0ecdb0ba6ac 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query47.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query47.out @@ -35,9 +35,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------PhysicalProject --------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query57.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query57.out index 0de9d7afec0114..10cbd1fa9a0489 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query57.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category];RF8 i_brand->[i_brand];RF9 cc_name->[cc_name];RF10 rn->[(rn - 1)] ----------------PhysicalProject -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query64.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query64.out index be12380c8a40f0..ac5d0d6d739e29 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query64.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query64.out @@ -90,10 +90,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out index ac1a7051ce017c..d8a82ca998ac09 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query75.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query75.out index dded6a5bb0e7d7..7a6c63c2385f24 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query75.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query75.out @@ -70,9 +70,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF6 i_brand_id->[i_brand_id];RF7 i_class_id->[i_class_id];RF8 i_category_id->[i_category_id];RF9 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 RF8 RF9 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query1.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query1.out index 9da3e4ef561a0b..c9404710bfa16f 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query1.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF3 ctr_store_sk->[ctr_store_sk,s_store_sk] --------------PhysicalProject ----------------hashJoin[INNER_JOIN broadcast] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF2 s_store_sk->[ctr_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF1 c_customer_sk->[ctr_customer_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 RF3 ----------------------PhysicalProject ------------------------PhysicalOlapScan[customer] ------------------PhysicalProject --------------------filter((store.s_state = 'SD')) -----------------------PhysicalOlapScan[store] +----------------------PhysicalOlapScan[store] apply RFs: RF3 --------------hashAgg[GLOBAL] ----------------PhysicalDistribute[DistributionSpecHash] ------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out index b44acb09519210..eaf1cde7b0a304 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out index 4f62dac28a161a..e4f277daf67cac 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out @@ -97,13 +97,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF12 ss_item_sk->[ss_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -120,15 +120,15 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk,i_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[cs_item_sk] +----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[item] +--------------------------------------------------PhysicalOlapScan[item] apply RFs: RF16 --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) @@ -144,15 +144,15 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[i_item_sk,ws_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[ws_item_sk] +----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF18 i_item_sk->[ws_item_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[item] +--------------------------------------------------PhysicalOlapScan[item] apply RFs: RF19 --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query23.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query23.out index 873b34450814bd..26e1f5089288a1 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query23.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 item_sk->[cs_item_sk] ------------------------PhysicalProject ---------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +--------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 RF5 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) ------------------------PhysicalOlapScan[date_dim] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF8 d_date_sk->[ws_sold_date_sk] --------------------PhysicalProject -----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 ws_item_sk->[item_sk] -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +----------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF7 ws_item_sk->[item_sk] +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------------PhysicalProject ---------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +--------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF6 c_customer_sk->[ws_bill_customer_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF8 ----------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) --------------------PhysicalProject ----------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query30.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query30.out index 6ace51502d61c6..6671347af5cc6c 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query30.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query30.out @@ -22,12 +22,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF3 ca_address_sk->[c_current_addr_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ctr_customer_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 RF4 ----------------------PhysicalProject ------------------------PhysicalOlapScan[customer] apply RFs: RF3 ------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query31.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query31.out index fb1cee757a39db..2a86f699341a7b 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query31.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county,ca_county,ca_county,ca_county] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) ---------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF7 ca_county->[ca_county,ca_county,ca_county] +--------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF6 ca_county->[ca_county,ca_county] ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 RF7 RF8 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 RF7 RF8 --------------------------PhysicalProject ----------------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------PhysicalProject ------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) ---------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF7 RF8 --------------------PhysicalProject ----------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) --------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query39.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query39.out index 714a012c9f2cc5..90c507f9c536f5 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query39.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out index 2ab0ebfe7fb09c..546119842b58bd 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id,customer_id,customer_id,customer_id] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF7 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF7 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query41.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query41.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query47.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query47.out index 659f33cec98833..b27001e19a607d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query47.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query57.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query57.out index 17896b5f964a2d..018e37ee77fbfa 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query57.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category];RF8 i_brand->[i_brand];RF9 cc_name->[cc_name];RF10 rn->[(rn - 1)] ----------------PhysicalProject -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query64.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query64.out index 01b9dac6daf5da..b7cf8115b6db75 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query64.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query64.out @@ -90,10 +90,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out index 58e91a6a35dcf3..64a56e4e850db7 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query75.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query75.out index dded6a5bb0e7d7..7a6c63c2385f24 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query75.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query75.out @@ -70,9 +70,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF6 i_brand_id->[i_brand_id];RF7 i_class_id->[i_class_id];RF8 i_category_id->[i_category_id];RF9 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 RF7 RF8 RF9 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query81.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query81.out index 0f21cb9d59718a..8006799a3cc5fd 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query81.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query81.out @@ -22,12 +22,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF3 ca_address_sk->[c_current_addr_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ctr_customer_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF2 RF4 ----------------------PhysicalProject ------------------------PhysicalOlapScan[customer] apply RFs: RF3 ------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query1.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query1.out index ee435a29842897..9ee4e3a3195cb8 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query1.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query1.out @@ -18,9 +18,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject ----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) ------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out index 9da76c59cd36a9..b699aa67e934a0 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out index da5880c398c4c2..361e56fa135dab 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out @@ -124,9 +124,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] @@ -148,9 +148,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query23.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query23.out index 3ac1b889aec542..1343c4f4fb0214 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query23.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query31.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query31.out index 2b5d8d54715328..eb49a9cb10b936 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query31.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query39.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query39.out index 6c438c8fe0bc3a..899b1a5e0bdd99 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query39.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out index 61bce070b34aaa..43138dc7c62651 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query41.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query41.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query47.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query47.out index e9c29da52d61a9..52bd4fa6fbf623 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query47.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query47.out @@ -35,9 +35,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------PhysicalProject --------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query57.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query57.out index ae1d9e8b5afbfe..60276268fc3224 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query57.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query57.out @@ -35,9 +35,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------PhysicalProject --------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query64.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query64.out index d60b114a56b88d..6aa36d179b1db1 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query64.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query64.out @@ -91,10 +91,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out index d58dde1f36cc23..f300a896a4d563 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query75.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query75.out index 70c4bbd1244c84..921d754e533285 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query75.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query1.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query1.out index ee435a29842897..4569832b6d5aa6 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query1.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject -----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF2 ctr_store_sk->[ctr_store_sk,s_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF1 s_store_sk->[ctr_store_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 ----------------------PhysicalProject ------------------------filter((store.s_state = 'SD')) ---------------------------PhysicalOlapScan[store] +--------------------------PhysicalOlapScan[store] apply RFs: RF2 ------------------hashAgg[GLOBAL] --------------------PhysicalDistribute[DistributionSpecHash] ----------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out index 73db27a320dcf0..bcfe7ba3d74e79 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2002) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 2002) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 2001) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 2001) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out index 91bdc0166d4571..d54d10fc56004f 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out @@ -97,16 +97,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject --------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,18 +120,18 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[cs_item_sk,i_item_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk] +------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[cs_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] --------------------------------------------PhysicalProject -----------------------------------------------PhysicalOlapScan[item] +----------------------------------------------PhysicalOlapScan[item] apply RFs: RF17 ----------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------PhysicalProject ------------------------------PhysicalAssertNumRows @@ -144,18 +144,18 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF20 ss_item_sk->[i_item_sk,ws_item_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ws_item_sk] +------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[ws_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] --------------------------------------------PhysicalProject -----------------------------------------------PhysicalOlapScan[item] +----------------------------------------------PhysicalOlapScan[item] apply RFs: RF20 ----------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------PhysicalProject ------------------------------PhysicalAssertNumRows diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query23.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query23.out index 7ae34bb3eb2975..91ab82284bc473 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query23.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 5) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query30.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query30.out index 2bb1de60cdd175..1fd2b5a1688c12 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query30.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query30.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query31.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query31.out index acec5a66b5dc11..c768d674841b33 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query31.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 2000)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 2000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 2000)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 2000)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query39.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query39.out index e1f48b088a80d5..b7ca740e55c672 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query39.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out index 1946f8020ded90..d99f0294700040 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query41.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query41.out index 34081b60b900b2..d20341c931a06d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query41.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 788) and (i1.i_manufact_id >= 748)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query47.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query47.out index bba74448d30ce6..f33181c085733c 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query47.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query57.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query57.out index 06e9f277d5a2a3..15dda452ebc984 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query57.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category,i_category];RF8 i_brand->[i_brand,i_brand];RF9 cc_name->[cc_name,cc_name];RF10 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 1999)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query64.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query64.out index cba6aec962c579..2ef8d6701ee4c5 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query64.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query64.out @@ -91,10 +91,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 2001)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2002)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out index 415df0902a0dad..0f159a647c03de 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.0), (year_total / year_total), NULL) > if((year_total > 0.0), (year_total / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.0)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.0)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query75.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query75.out index 70c4bbd1244c84..921d754e533285 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query75.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 1999)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 1998)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query81.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query81.out index 4904c44cee3293..e6aef6266d392b 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query81.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query81.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query1.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query1.out index fbd48ddbc0936c..e4a4bd5e427fa9 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query1.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query1.out @@ -18,17 +18,17 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 ctr_customer_sk->[c_customer_sk] --------------PhysicalProject -----------------PhysicalOlapScan[customer] +----------------PhysicalOlapScan[customer] apply RFs: RF3 --------------PhysicalProject -----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_store_sk = ctr2.ctr_store_sk)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF2 ctr_store_sk->[ctr_store_sk,s_store_sk] ------------------PhysicalProject --------------------hashJoin[INNER_JOIN shuffle] hashCondition=((store.s_store_sk = ctr1.ctr_store_sk)) otherCondition=() build RFs:RF1 s_store_sk->[ctr_store_sk] -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF1 RF2 ----------------------PhysicalProject ------------------------filter((store.s_state = 'TN')) ---------------------------PhysicalOlapScan[store] +--------------------------PhysicalOlapScan[store] apply RFs: RF2 ------------------hashAgg[GLOBAL] --------------------PhysicalDistribute[DistributionSpecHash] ----------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out index 828c9a910d6c7a..5593df9194937d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out @@ -35,19 +35,19 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000) > if((year_total > 0.00), (cast(year_total as DECIMALV3(38, 8)) / year_total), 0.000000))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 1999) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.dyear = 1999) and (t_s_secyear.sale_type = 's')) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.dyear = 1998) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1998) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out index 5fb881643448c0..813924b9b99e18 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out @@ -97,16 +97,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject --------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +120,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF14 RF15 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +143,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query23.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query23.out index 480608162ed25e..5e2eafb60ffe8d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query23.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query23.out @@ -52,27 +52,27 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------------hashAgg[LOCAL] --------------PhysicalUnion ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF5 cs_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF4 c_customer_sk->[cs_bill_customer_sk] ------------------------PhysicalProject --------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 +------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF3 RF4 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 7) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalCteConsumer ( cteId=CTEId#2 ) ----------------PhysicalProject -------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF6 ws_item_sk->[item_sk] ---------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 +------------------hashJoin[RIGHT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = frequent_ss_items.item_sk)) otherCondition=() build RFs:RF8 ws_item_sk->[item_sk] +--------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------------PhysicalProject -----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() +----------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_bill_customer_sk = best_ss_customer.c_customer_sk)) otherCondition=() build RFs:RF7 c_customer_sk->[ws_bill_customer_sk] ------------------------PhysicalProject ---------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[ws_sold_date_sk] +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF6 d_date_sk->[ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF5 +------------------------------PhysicalOlapScan[web_sales] apply RFs: RF6 RF7 ----------------------------PhysicalProject ------------------------------filter((date_dim.d_moy = 7) and (date_dim.d_year = 2000)) --------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query30.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query30.out index 0c415fc496c573..c894fcceff19a5 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query30.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query30.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query31.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query31.out index ed1de5614536fb..f17ab6cf863612 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query31.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query31.out @@ -37,28 +37,28 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalQuickSort[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) +--------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ws1.ca_county = ws3.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF8 ca_county->[ca_county] ----------------PhysicalProject ------------------filter((ws3.d_qoy = 3) and (ws3.d_year = 1999)) ---------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +--------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF8 ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((ss2.ca_county = ss3.ca_county)) otherCondition=() build RFs:RF7 ca_county->[ca_county] --------------------PhysicalProject ----------------------filter((ss3.d_qoy = 3) and (ss3.d_year = 1999)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() +----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ws1.ca_county = ws2.ca_county)) otherCondition=((if((web_sales > 0.00), (cast(web_sales as DECIMALV3(38, 8)) / web_sales), NULL) > if((store_sales > 0.00), (cast(store_sales as DECIMALV3(38, 8)) / store_sales), NULL))) build RFs:RF6 ca_county->[ca_county,ca_county,ca_county] +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((ss1.ca_county = ws1.ca_county)) otherCondition=() build RFs:RF5 ca_county->[ca_county,ca_county] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((ss1.ca_county = ss2.ca_county)) otherCondition=() build RFs:RF4 ca_county->[ca_county] ----------------------------PhysicalProject ------------------------------filter((ss1.d_qoy = 1) and (ss1.d_year = 1999)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 RF6 ----------------------------PhysicalProject ------------------------------filter((ss2.d_qoy = 2) and (ss2.d_year = 1999)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 RF6 --------------------------PhysicalProject ----------------------------filter((ws1.d_qoy = 1) and (ws1.d_year = 1999)) -------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF6 ------------------------PhysicalProject --------------------------filter((ws2.d_qoy = 2) and (ws2.d_year = 1999)) ----------------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query39.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query39.out index 6b69ec2c1355eb..7b00628d966265 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query39.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query39.out @@ -25,9 +25,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----PhysicalQuickSort[MERGE_SORT] ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] -----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() +----------hashJoin[INNER_JOIN shuffle] hashCondition=((inv1.i_item_sk = inv2.i_item_sk) and (inv1.w_warehouse_sk = inv2.w_warehouse_sk)) otherCondition=() build RFs:RF3 i_item_sk->[i_item_sk];RF4 w_warehouse_sk->[w_warehouse_sk] ------------filter((inv1.d_moy = 1)) ---------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------filter((inv2.d_moy = 2)) --------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out index 1946f8020ded90..d99f0294700040 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out @@ -46,29 +46,29 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF8 customer_id->[customer_id] --------------PhysicalProject ----------------filter((t_w_secyear.dyear = 2000) and (t_w_secyear.sale_type = 'w')) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 --------------PhysicalProject -----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() +----------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF7 customer_id->[customer_id] ------------------PhysicalProject --------------------filter((t_w_firstyear.dyear = 1999) and (t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year_total > 0.000000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) +--------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_c_secyear.customer_id)) otherCondition=((if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL) > if((year_total > 0.000000), (cast(year_total as DECIMALV3(38, 16)) / year_total), NULL))) build RFs:RF6 customer_id->[customer_id] ----------------------PhysicalProject ------------------------filter((t_c_secyear.dyear = 2000) and (t_c_secyear.sale_type = 'c')) ---------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF6 ----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() ---------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_c_firstyear.customer_id)) otherCondition=() build RFs:RF5 customer_id->[customer_id,customer_id] +--------------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id] ----------------------------PhysicalProject ------------------------------filter((t_s_secyear.dyear = 2000) and (t_s_secyear.sale_type = 's')) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 RF5 ----------------------------PhysicalProject ------------------------------filter((t_s_firstyear.dyear = 1999) and (t_s_firstyear.sale_type = 's') and (t_s_firstyear.year_total > 0.000000)) ---------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +--------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 --------------------------PhysicalProject ----------------------------filter((t_c_firstyear.dyear = 1999) and (t_c_firstyear.sale_type = 'c') and (t_c_firstyear.year_total > 0.000000)) ------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query41.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query41.out index c27e19cc9f2387..2dd4aadeae29a7 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query41.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query41.out @@ -13,7 +13,7 @@ PhysicalResultSink --------------------filter((i1.i_manufact_id <= 744) and (i1.i_manufact_id >= 704)) ----------------------PhysicalOlapScan[item] apply RFs: RF0 ------------------PhysicalProject ---------------------filter((item_cnt > 0)) +--------------------filter((ifnull(item_cnt, 0) > 0)) ----------------------hashAgg[GLOBAL] ------------------------PhysicalDistribute[DistributionSpecHash] --------------------------hashAgg[LOCAL] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query47.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query47.out index a7bc45eeacdd3f..0ea84e3ca2b33d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query47.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query47.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1)) and (v1.s_company_name = v1_lead.s_company_name) and (v1.s_store_name = v1_lead.s_store_name)) otherCondition=() build RFs:RF8 i_category->[i_category,i_category];RF9 i_brand->[i_brand,i_brand];RF10 s_store_name->[s_store_name,s_store_name];RF11 s_company_name->[s_company_name,s_company_name];RF12 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1)) and (v1.s_company_name = v1_lag.s_company_name) and (v1.s_store_name = v1_lag.s_store_name)) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 s_store_name->[s_store_name];RF6 s_company_name->[s_company_name];RF7 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 RF11 RF12 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2000)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF8 RF9 RF10 RF11 RF12 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query57.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query57.out index 5c47b7ae249995..356837343861e9 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query57.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query57.out @@ -33,13 +33,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------PhysicalTopN[LOCAL_SORT] ------------PhysicalProject ---------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() +--------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((v1.cc_name = v1_lead.cc_name) and (v1.i_brand = v1_lead.i_brand) and (v1.i_category = v1_lead.i_category) and (v1.rn = expr_(rn - 1))) otherCondition=() build RFs:RF7 i_category->[i_category,i_category];RF8 i_brand->[i_brand,i_brand];RF9 cc_name->[cc_name,cc_name];RF10 expr_(rn - 1)->[(rn + 1),rn] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((v1.cc_name = v1_lag.cc_name) and (v1.i_brand = v1_lag.i_brand) and (v1.i_category = v1_lag.i_category) and (v1.rn = expr_(rn + 1))) otherCondition=() build RFs:RF3 i_category->[i_category];RF4 i_brand->[i_brand];RF5 cc_name->[cc_name];RF6 rn->[(rn + 1)] --------------------PhysicalProject -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 RF5 RF6 RF7 RF8 RF9 RF10 --------------------filter((if((avg_monthly_sales > 0.0000), (cast(abs((sum_sales - cast(avg_monthly_sales as DECIMALV3(38, 2)))) as DECIMALV3(38, 10)) / avg_monthly_sales), NULL) > 0.100000) and (v2.avg_monthly_sales > 0.0000) and (v2.d_year = 2001)) -----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF7 RF8 RF9 RF10 ----------------PhysicalProject ------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query64.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query64.out index 4114b30062a48a..8155e898243e42 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query64.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query64.out @@ -91,10 +91,10 @@ PhysicalCteAnchor ( cteId=CTEId#1 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalQuickSort[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((cs1.item_sk = cs2.item_sk) and (cs1.store_name = cs2.store_name) and (cs1.store_zip = cs2.store_zip)) otherCondition=((cs2.cnt <= cs1.cnt)) build RFs:RF20 item_sk->[item_sk];RF21 store_name->[store_name];RF22 store_zip->[store_zip] --------------PhysicalProject ----------------filter((cs1.syear = 1999)) -------------------PhysicalCteConsumer ( cteId=CTEId#1 ) +------------------PhysicalCteConsumer ( cteId=CTEId#1 ) apply RFs: RF20 RF21 RF22 --------------PhysicalProject ----------------filter((cs2.syear = 2000)) ------------------PhysicalCteConsumer ( cteId=CTEId#1 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out index ebb6f5a717b8c0..8b171914ebd371 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out @@ -35,20 +35,20 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL) > if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL))) +------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((t_s_firstyear.customer_id = t_w_secyear.customer_id)) otherCondition=((if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL) > if((year_total > 0.00), (cast(year_total as DECIMALV3(13, 8)) / year_total), NULL))) build RFs:RF5 customer_id->[customer_id] --------------PhysicalProject -----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() +----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF5 +--------------PhysicalProject +----------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((t_s_firstyear.customer_id = t_w_firstyear.customer_id)) otherCondition=() build RFs:RF4 customer_id->[customer_id,customer_id] +------------------hashJoin[INNER_JOIN shuffle] hashCondition=((t_s_secyear.customer_id = t_s_firstyear.customer_id)) otherCondition=() build RFs:RF3 customer_id->[customer_id] --------------------PhysicalProject ----------------------filter((t_s_secyear.sale_type = 's') and (t_s_secyear.year = 2000)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 --------------------PhysicalProject ----------------------filter((t_s_firstyear.sale_type = 's') and (t_s_firstyear.year = 1999) and (t_s_firstyear.year_total > 0.00)) -------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) +------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF4 ------------------PhysicalProject --------------------filter((t_w_firstyear.sale_type = 'w') and (t_w_firstyear.year = 1999) and (t_w_firstyear.year_total > 0.00)) ----------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ---------------PhysicalProject -----------------filter((t_w_secyear.sale_type = 'w') and (t_w_secyear.year = 2000)) -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query75.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query75.out index 51355f21bc2517..c26b81b87791ba 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query75.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query75.out @@ -65,9 +65,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) +------------hashJoin[INNER_JOIN shuffle] hashCondition=((curr_yr.i_brand_id = prev_yr.i_brand_id) and (curr_yr.i_category_id = prev_yr.i_category_id) and (curr_yr.i_class_id = prev_yr.i_class_id) and (curr_yr.i_manufact_id = prev_yr.i_manufact_id)) otherCondition=(((cast(cast(sales_cnt as DECIMALV3(17, 2)) as DECIMALV3(23, 8)) / cast(sales_cnt as DECIMALV3(17, 2))) < 0.900000)) build RFs:RF12 i_brand_id->[i_brand_id];RF13 i_class_id->[i_class_id];RF14 i_category_id->[i_category_id];RF15 i_manufact_id->[i_manufact_id] --------------filter((curr_yr.d_year = 2002)) -----------------PhysicalCteConsumer ( cteId=CTEId#0 ) +----------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 RF13 RF14 RF15 --------------filter((prev_yr.d_year = 2001)) ----------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query81.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query81.out index da6fc8d51741b8..b3b6627dd0716d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query81.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query81.out @@ -22,10 +22,10 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ------PhysicalDistribute[DistributionSpecGather] --------PhysicalTopN[LOCAL_SORT] ----------PhysicalProject -------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) +------------hashJoin[INNER_JOIN broadcast] hashCondition=((ctr1.ctr_state = ctr2.ctr_state)) otherCondition=((cast(ctr_total_return as DOUBLE) > cast((avg(cast(ctr_total_return as DECIMALV3(38, 4))) * 1.2) as DOUBLE))) build RFs:RF4 ctr_state->[ctr_state] --------------PhysicalProject ----------------hashJoin[INNER_JOIN shuffle] hashCondition=((ctr1.ctr_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[ctr_customer_sk] -------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 +------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF3 RF4 ------------------PhysicalProject --------------------hashJoin[INNER_JOIN broadcast] hashCondition=((customer_address.ca_address_sk = customer.c_current_addr_sk)) otherCondition=() build RFs:RF2 ca_address_sk->[c_current_addr_sk] ----------------------PhysicalProject diff --git a/regression-test/data/query_p0/aggregate/array_agg.out b/regression-test/data/query_p0/aggregate/array_agg.out index 5f019f755e0e95..1fe44df3a144bc 100644 --- a/regression-test/data/query_p0/aggregate/array_agg.out +++ b/regression-test/data/query_p0/aggregate/array_agg.out @@ -95,3 +95,159 @@ 3 3 3 3 +-- !sql_array_agg_array -- +1 [["plum", "banana", "apple"], ["grape", "banana", null, "plum", "cherry"], ["apple", "banana", "kiwi", null], ["apple", "banana", "cherry", "kiwi", null], ["cherry", null], null] +10 [null, ["apple", "banana", null, "cherry", "grape"], ["cherry", "berry", null], ["peach"]] +11 [["grape", "apple", "kiwi"], null, null, null] +12 [["melon", "papaya", "kiwi"], ["plum", null, "kiwi", "banana"], ["plum", null, "mango"], ["plum", null]] +13 [["apple", null], null, ["peach", "cherry", "papaya", "kiwi", null], ["plum", null]] +14 [["orange", "mango", "plum"], ["apple", "melon"], ["orange", "grape", null], ["orange", "banana", null]] +15 [null, ["banana", "peach", "plum", null], null, ["strawberry", null]] +16 [["peach", "kiwi", null, "berry"], null, ["plum", "grape", null], ["kiwi", null]] +17 [["banana", "plum", null], ["papaya"], null, ["apple", "kiwi", null, "papaya"]] +18 [["apple", null], null, ["apple", "mango", null], null] +19 [["kiwi", "mango", null], ["pear", "grape"], ["cherry", null, "plum"], ["banana", "mango", "cherry"]] +2 [null, ["apple", null, "banana"], ["orange", "grape", null], null] +20 [["grape", null], ["kiwi", null], ["kiwi", "plum", "orange", null], ["papaya", "orange", "kiwi", null]] +21 [["kiwi", null]] +22 [["orange", "peach", null, "kiwi"]] +23 [["berry", "grape", null]] +24 [null] +25 [["mango", "plum", "apple", null]] +26 [["banana", null]] +27 [["orange", "kiwi", "plum", null]] +28 [null] +29 [["apple", null, "grape", "peach"]] +3 [["mango", null], ["orange"], ["apple", "kiwi", "papaya"], ["plum", "peach", null]] +30 [["kiwi", "banana", null]] +31 [["cherry", "berry", null, "plum"]] +32 [null] +33 [["apple", null, "kiwi", "orange"]] +34 [["grape", "plum", null]] +35 [["banana", null]] +36 [["kiwi", "orange", "plum", null]] +37 [null] +38 [["apple", null]] +39 [["plum", "grape", null]] +4 [["mango", null, "orange", "plum", "berry", "kiwi"], ["orange", "grape", "mango", "berry"], ["plum", "kiwi", null, "peach", "berry"], null] +40 [["banana", "kiwi", "peach", null]] +41 [["grape", null, "plum"]] +42 [["orange", "kiwi", "peach", null]] +43 [null] +44 [["apple", "banana", null]] +45 [["grape", null]] +46 [["plum", "kiwi", null, "orange"]] +47 [null] +48 [["mango", null]] +49 [["kiwi", "plum", "banana", null]] +5 [["peach", "melon", null], ["apple", null, "kiwi"], ["grape", "kiwi", null], null] +50 [null] +6 [null, null, ["cherry", "apple", null, "plum"], null] +7 [["papaya", "cherry", "apple", null], ["melon"], ["melon", null, "papaya", "grape", "kiwi", "berry", null], ["orange", "grape", "kiwi"]] +8 [["plum", "peach", null, "orange"], ["banana", null], ["berry", "cherry"], ["banana", "mango", null]] +9 [["orange", "kiwi", "berry", null, "plum"], ["apple", "kiwi", "plum", null, "mango"], ["kiwi", null], null] + +-- !sql_array_agg_map -- +1 [{"key5":null}, {"key2":15, "key3":8}, {"key1":10, "key2":5}, {"key1":10, "key2":20}, {"key2":null}, null] +10 [{"key3":5, "key4":null}, {"key1":null, "key6":9}, {"key2":10, "key7":null}, {"key1":10}] +11 [{"key1":9}, {"key4":5, "key5":null}, {"key1":3, "key5":null}, {"key4":null}] +12 [null, {"key4":25}, {"key2":20, "key3":null}, {"key1":null, "key2":5}] +13 [{"key2":null, "key3":7}, null, null, {"key3":null, "key5":10}] +14 [{"key6":5}, {"key5":15, "key6":25}, {"key1":3, "key6":8}, {"key4":3, "key5":null}] +15 [{"key1":18, "key6":22}, {"key2":4}, {"key3":null}, null] +16 [{"key2":20}, {"key2":2}, {"key2":8, "key3":null}, {"key7":7, "key3":null}] +17 [{"key4":8}, {"key6":9, "key7":null}, {"key1":10, "key4":14}, {"key7":null}] +18 [{"key1":11}, {"key1":10, "key2":null}, {"key2":2}, {"key2":null, "key5":10}] +19 [{"key7":9}, {"key1":1, "key2":2, "key3":3}, {"key1":null, "key7":6}, {"key3":7, "key4":null}] +2 [{"key1":null, "key5":25}, {"key1":10, "key2":null, "key3":20}, {"key2":null, "key3":7}, {"key3":null}] +20 [{"key1":null, "key3":6}, {"key1":1, "key9":6}, {"key1":14}, {"key5":3, "key7":null}] +21 [{"key1":10, "key6":2}] +22 [{"key3":null}] +23 [{"key1":8}] +24 [{"key2":15, "key4":null}] +25 [{"key7":18}] +26 [{"key3":12}] +27 [{"key5":10}] +28 [{"key1":14}] +29 [{"key2":4, "key4":null}] +3 [{"key1":12}, {"key1":5}, {"key3":null}, {"key1":5, "key4":null}] +30 [{"key6":6}] +31 [{"key3":null}] +32 [{"key2":9, "key7":null}] +33 [{"key1":7}] +34 [{"key4":20}] +35 [{"key1":12, "key5":null}] +36 [{"key3":11}] +37 [{"key1":null}] +38 [{"key2":3, "key6":9}] +39 [{"key5":8}] +4 [{"key2":30}, null, {"key4":15}, {"key3":7, "key4":null}] +40 [{"key1":15}] +41 [{"key3":7}] +42 [{"key4":5}] +43 [{"key1":2, "key7":null}] +44 [{"key2":14}] +45 [{"key4":12}] +46 [{"key6":10}] +47 [{"key2":null}] +48 [{"key5":9}] +49 [{"key1":13}] +5 [{"key1":10}, {"key1":7, "key2":8}, null, {"key2":8, "key5":null}] +50 [{"key7":8}] +6 [{"key4":7, "key6":null}, {"key1":1, "key2":2, "key3":null, "key4":4}, {"key3":null, "key6":12}, {"key2":null, "key3":25}] +7 [{"key1":12, "key3":6}, null, {"key4":15, "key5":null}, {"key1":5}] +8 [{"key1":6, "key7":12}, {"key2":9}, {"key1":null, "key5":50}, null] +9 [{"key2":null, "key5":40}, null, {"key2":14, "key5":7}, {"key1":10, "key2":20, "key3":30, "key4":40, "key5":50, "key6":60, "key7":null}] + +-- !sql_array_agg_struct -- +1 [{"id":1}, {"id":1}, {"id":1}, {"id":1}, {"id":1}, null] +10 [{"id":10}, {"id":10}, {"id":10}, {"id":null}] +11 [{"id":11}, {"id":11}, {"id":11}, {"id":null}] +12 [{"id":12}, {"id":12}, {"id":12}, {"id":null}] +13 [{"id":13}, {"id":13}, {"id":13}, {"id":null}] +14 [{"id":14}, {"id":null}, {"id":14}, {"id":null}] +15 [{"id":15}, {"id":null}, {"id":15}, {"id":null}] +16 [{"id":16}, {"id":16}, {"id":16}, {"id":16}] +17 [{"id":17}, {"id":17}, {"id":17}, {"id":17}] +18 [{"id":18}, {"id":null}, {"id":18}, {"id":18}] +19 [{"id":19}, {"id":null}, {"id":19}, {"id":19}] +2 [{"id":2}, {"id":null}, {"id":2}, {"id":2}] +20 [{"id":20}, {"id":20}, {"id":null}, {"id":null}] +21 [{"id":21}] +22 [{"id":22}] +23 [{"id":23}] +24 [{"id":24}] +25 [{"id":25}] +26 [{"id":26}] +27 [{"id":27}] +28 [{"id":28}] +29 [{"id":29}] +3 [{"id":3}, {"id":3}, {"id":3}, {"id":3}] +30 [{"id":30}] +31 [{"id":31}] +32 [{"id":32}] +33 [{"id":33}] +34 [{"id":34}] +35 [{"id":35}] +36 [{"id":36}] +37 [{"id":37}] +38 [{"id":38}] +39 [{"id":39}] +4 [{"id":null}, {"id":4}, {"id":4}, {"id":4}] +40 [{"id":40}] +41 [{"id":41}] +42 [{"id":42}] +43 [{"id":43}] +44 [{"id":44}] +45 [{"id":45}] +46 [{"id":46}] +47 [{"id":47}] +48 [{"id":48}] +49 [{"id":49}] +5 [{"id":5}, {"id":null}, {"id":5}, {"id":5}] +50 [{"id":50}] +6 [{"id":6}, {"id":6}, {"id":6}, {"id":6}] +7 [{"id":null}, {"id":null}, {"id":null}, {"id":7}] +8 [{"id":8}, {"id":8}, {"id":8}, {"id":8}] +9 [{"id":9}, {"id":9}, {"id":9}, {"id":9}] + diff --git a/regression-test/data/query_p0/aggregate/test_array_agg_complex.csv b/regression-test/data/query_p0/aggregate/test_array_agg_complex.csv new file mode 100644 index 00000000000000..920e2d80280fa6 --- /dev/null +++ b/regression-test/data/query_p0/aggregate/test_array_agg_complex.csv @@ -0,0 +1,112 @@ +1 \N \N \N +1 ["cherry", null] {"key2":null} {"id": 1} +1 ["apple", "banana", "cherry", "kiwi", null] {"key1":10, "key2":20} {"id": 1} +1 ["apple", "banana", "kiwi", null] {"key1":10, "key2":5} {"id": 1} +1 ["grape", "banana", null, "plum", "cherry"] {"key2":15, "key3":8} {"id": 1} +1 ["plum", "banana", "apple"] {"key5":null} {"id": 1} +2 \N {"key3":null} {"id": 2} +2 ["orange", "grape", null] {"key2":null, "key3":7} {"id": 2} +2 ["apple", null, "banana"] {"key1":10, "key2":null, "key3":20} {"id": null} +2 \N {"key1":null, "key5":25} {"id": 2} +3 ["plum", "peach", null] {"key1":5, "key4":null} {"id": 3} +3 ["apple", "kiwi", "papaya"] {"key3":null} {"id": 3} +3 ["orange"] {"key1":5} {"id": 3} +3 ["mango", null] {"key1":12} {"id": 3} +4 \N {"key3":7, "key4":null} {"id": 4} +4 ["plum", "kiwi", null, "peach", "berry"] {"key4":15} {"id": 4} +4 ["orange", "grape", "mango", "berry"] \N {"id": 4} +4 ["mango", null, "orange", "plum", "berry", "kiwi"] {"key2":30} {"id": null} +5 \N {"key2":8, "key5":null} {"id": 5} +5 ["grape", "kiwi", null] \N {"id": 5} +5 ["apple", null, "kiwi"] {"key1":7, "key2":8} {"id": null} +5 ["peach", "melon", null] {"key1":10} {"id": 5} +6 \N {"key2":null, "key3":25} {"id": 6} +6 ["cherry", "apple", null, "plum"] {"key3":null, "key6":12} {"id": 6} +6 \N {"key1":1, "key2":2, "key3":null, "key4":4} {"id": 6} +6 \N {"key4":7, "key6":null} {"id": 6} +7 ["orange", "grape", "kiwi"] {"key1":5} {"id": 7} +7 ["melon", null, "papaya", "grape", "kiwi", "berry", null] {"key4":15, "key5":null} {"id": null} +7 ["melon"] \N {"id": null} +7 ["papaya", "cherry", "apple", null] {"key1":12, "key3":6} {"id": null} +8 ["banana", "mango", null] \N {"id": 8} +8 ["berry", "cherry"] {"key1":null, "key5":50} {"id": 8} +8 ["banana", null] {"key2":9} {"id": 8} +8 ["plum", "peach", null, "orange"] {"key1":6, "key7":12} {"id": 8} +9 \N {"key1":10, "key2":20, "key3":30, "key4":40, "key5":50, "key6":60, "key7":null} {"id": 9} +9 ["kiwi", null] {"key2":14, "key5":7} {"id": 9} +9 ["apple", "kiwi", "plum", null, "mango"] \N {"id": 9} +9 ["orange", "kiwi", "berry", null, "plum"] {"key2":null, "key5":40} {"id": 9} +10 ["peach"] {"key1":10} {"id": null} +10 ["cherry", "berry", null] {"key2":10, "key7":null} {"id": 10} +10 ["apple", "banana", null, "cherry", "grape"] {"key1":null, "key6":9} {"id": 10} +10 \N {"key3":5, "key4":null} {"id": 10} +11 \N {"key4":null} {"id": null} +11 \N {"key1":3, "key5":null} {"id": 11} +11 \N {"key4":5, "key5":null} {"id": 11} +11 ["grape", "apple", "kiwi"] {"key1":9} {"id": 11} +12 ["plum", null] {"key1":null, "key2":5} {"id": null} +12 ["plum", null, "mango"] {"key2":20, "key3":null} {"id": 12} +12 ["plum", null, "kiwi", "banana"] {"key4":25} {"id": 12} +12 ["melon", "papaya", "kiwi"] \N {"id": 12} +13 ["plum", null] {"key3":null, "key5":10} {"id": null} +13 ["peach", "cherry", "papaya", "kiwi", null] \N {"id": 13} +13 \N \N {"id": 13} +13 ["apple", null] {"key2":null, "key3":7} {"id": 13} +14 ["orange", "banana", null] {"key4":3, "key5":null} {"id": null} +14 ["orange", "grape", null] {"key1":3, "key6":8} {"id": 14} +14 ["apple", "melon"] {"key5":15, "key6":25} {"id": null} +14 ["orange", "mango", "plum"] {"key6":5} {"id": 14} +15 ["strawberry", null] \N {"id": null} +15 \N {"key3":null} {"id": 15} +15 ["banana", "peach", "plum", null] {"key2":4} {"id": null} +15 \N {"key1":18, "key6":22} {"id": 15} +16 ["kiwi", null] {"key7":7, "key3":null} {"id": 16} +16 ["plum", "grape", null] {"key2":8, "key3":null} {"id": 16} +16 \N {"key2":2} {"id": 16} +16 ["peach", "kiwi", null, "berry"] {"key2":20} {"id": 16} +17 ["apple", "kiwi", null, "papaya"] {"key7":null} {"id": 17} +17 \N {"key1":10, "key4":14} {"id": 17} +17 ["papaya"] {"key6":9, "key7":null} {"id": 17} +17 ["banana", "plum", null] {"key4":8} {"id": 17} +18 \N {"key2":null, "key5":10} {"id": 18} +18 ["apple", "mango", null] {"key2":2} {"id": 18} +18 \N {"key1":10, "key2":null} {"id": null} +18 ["apple", null] {"key1":11} {"id": 18} +19 ["banana", "mango", "cherry"] {"key3":7, "key4":null} {"id": 19} +19 ["cherry", null, "plum"] {"key1":null, "key7":6} {"id": 19} +19 ["pear", "grape"] {"key1":1, "key2":2, "key3":3} {"id": null} +19 ["kiwi", "mango", null] {"key7":9} {"id": 19} +20 ["papaya", "orange", "kiwi", null] {"key5":3, "key7":null} {"id": null} +20 ["kiwi", "plum", "orange", null] {"key1":14} {"id": null} +20 ["kiwi", null] {"key1":1, "key9":6} {"id": 20} +20 ["grape", null] {"key1":null, "key3":6} {"id": 20} +21 ["kiwi", null] {"key1":10, "key6":2} {"id": 21} +22 ["orange", "peach", null, "kiwi"] {"key3":null} {"id": 22} +23 ["berry", "grape", null] {"key1":8} {"id": 23} +24 \N {"key2":15, "key4":null} {"id": 24} +25 ["mango", "plum", "apple", null] {"key7":18} {"id": 25} +26 ["banana", null] {"key3":12} {"id": 26} +27 ["orange", "kiwi", "plum", null] {"key5":10} {"id": 27} +28 \N {"key1":14} {"id": 28} +29 ["apple", null, "grape", "peach"] {"key2":4, "key4":null} {"id": 29} +30 ["kiwi", "banana", null] {"key6":6} {"id": 30} +31 ["cherry", "berry", null, "plum"] {"key3":null} {"id": 31} +32 \N {"key2":9, "key7":null} {"id": 32} +33 ["apple", null, "kiwi", "orange"] {"key1":7} {"id": 33} +34 ["grape", "plum", null] {"key4":20} {"id": 34} +35 ["banana", null] {"key1":12, "key5":null} {"id": 35} +36 ["kiwi", "orange", "plum", null] {"key3":11} {"id": 36} +37 \N {"key1":null} {"id": 37} +38 ["apple", null] {"key2":3, "key6":9} {"id": 38} +39 ["plum", "grape", null] {"key5":8} {"id": 39} +40 ["banana", "kiwi", "peach", null] {"key1":15} {"id": 40} +41 ["grape", null, "plum"] {"key3":7} {"id": 41} +42 ["orange", "kiwi", "peach", null] {"key4":5} {"id": 42} +43 \N {"key1":2, "key7":null} {"id": 43} +44 ["apple", "banana", null] {"key2":14} {"id": 44} +45 ["grape", null] {"key4":12} {"id": 45} +46 ["plum", "kiwi", null, "orange"] {"key6":10} {"id": 46} +47 \N {"key2":null} {"id": 47} +48 ["mango", null] {"key5":9} {"id": 48} +49 ["kiwi", "plum", "banana", null] {"key1":13} {"id": 49} +50 \N {"key7":8} {"id": 50} diff --git a/regression-test/data/query_p0/sql_functions/array_functions/test_array_functions_by_literal.out b/regression-test/data/query_p0/sql_functions/array_functions/test_array_functions_by_literal.out index bddcebea7003a4..221d38b267a9d2 100644 --- a/regression-test/data/query_p0/sql_functions/array_functions/test_array_functions_by_literal.out +++ b/regression-test/data/query_p0/sql_functions/array_functions/test_array_functions_by_literal.out @@ -744,10 +744,10 @@ _ ["2022-01-03 00:00:00", "2021-01-01 00:00:00"] -- !sql -- -[25.990] +[25.990000000] -- !sql -- -[24.990, 25.990] +[24.990000000, 25.990000000] -- !sql -- [1, 2, 3, 2, 3, 4, 8, 1, 2, 9] @@ -774,28 +774,28 @@ _ ["2023-03-05 12:23:24.999", "2023-03-05 15:23:23.997"] -- !sql -- -[{"col":"a", "col":"d"}, {"col":"b", "col":"e"}, {"col":"c", "col":"f"}] +[{"1":"a", "2":"d"}, {"1":"b", "2":"e"}, {"1":"c", "2":"f"}] -- !sql -- -[{"col":"a", "col":"d", "col":"g"}, {"col":"b", "col":"e", "col":"h"}, {"col":"c", "col":"f", "col":"i"}] +[{"1":"a", "2":"d", "3":"g"}, {"1":"b", "2":"e", "3":"h"}, {"1":"c", "2":"f", "3":"i"}] -- !sql -- -[{"col":1, "col":"d"}, {"col":2, "col":"o"}, {"col":3, "col":"r"}, {"col":4, "col":"i"}, {"col":5, "col":"s"}] +[{"1":1, "2":"d"}, {"1":2, "2":"o"}, {"1":3, "2":"r"}, {"1":4, "2":"i"}, {"1":5, "2":"s"}] -- !sql -- -[{"col":1.1, "col":1}, {"col":2.2, "col":2}, {"col":3.3, "col":3}] +[{"1":1.1, "2":1}, {"1":2.2, "2":2}, {"1":3.3, "2":3}] -- !sql -- -[{"col":1, "col":null}, {"col":null, "col":"b"}, {"col":3, "col":null}] +[{"1":1, "2":null}, {"1":null, "2":"b"}, {"1":3, "2":null}] -- !sql -- -[{"col":3.050, "col":3.140}, {"col":2.220, "col":6.660}] +[{"1":3.050, "2":3.140}, {"1":2.220, "2":6.660}] -- !sql -- -[{"col":"2000-03-05", "col":"2000-02-02"}, {"col":"2023-03-10", "col":"2023-03-10"}] +[{"1":"2000-03-05", "2":"2000-02-02"}, {"1":"2023-03-10", "2":"2023-03-10"}] -- !sql -- -[{"col":"2023-03-05 12:23:24.999"}, {"col":"2023-03-05 15:23:23.997"}] +[{"1":"2023-03-05 12:23:24.999"}, {"1":"2023-03-05 15:23:23.997"}] -- !sql -- \N diff --git a/regression-test/data/query_p0/sql_functions/cast_function/test_cast_map_function.out b/regression-test/data/query_p0/sql_functions/cast_function/test_cast_map_function.out index dead718723c4bb..2b4e1abd302ce4 100644 --- a/regression-test/data/query_p0/sql_functions/cast_function/test_cast_map_function.out +++ b/regression-test/data/query_p0/sql_functions/cast_function/test_cast_map_function.out @@ -39,8 +39,8 @@ {null:"12", 123:"7777"} -- !sql9 -- -{null:1, null:2, 1234567:77} -{null:12, 123:7777} +{null:1.000000000, null:2.000000000, 1234567:77.000000000} +{null:12.000000000, 123:7777.000000000} -- !sql10 -- {null:null, null:null, 1234567:null} diff --git a/regression-test/data/query_p0/sql_functions/datetime_functions/test_from_iso8601_date.out b/regression-test/data/query_p0/sql_functions/datetime_functions/test_from_iso8601_date.out new file mode 100644 index 00000000000000..43a4f0bd496c38 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/datetime_functions/test_from_iso8601_date.out @@ -0,0 +1,244 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !test_31 -- +0000-01-01 + +-- !test_32 -- +0001-01-01 + +-- !test_33 -- +1900-01-01 + +-- !test_34 -- +1970-01-01 + +-- !test_35 -- +9999-01-01 + +-- !test_36 -- +0000-01-01 + +-- !test_37 -- +0000-02-28 + +-- !test_38 -- +0001-02-28 + +-- !test_39 -- +1900-02-28 + +-- !test_40 -- +1970-01-01 + +-- !test_41 -- +9999-12-31 + +-- !test_42 -- +0000-02-28 + +-- !test_43 -- +0001-02-28 + +-- !test_44 -- +1900-02-28 + +-- !test_45 -- +1970-01-01 + +-- !test_46 -- +9999-12-31 + +-- !test_47 -- +0000-01-01 + +-- !test_48 -- +0000-02-01 + +-- !test_49 -- +0001-03-01 + +-- !test_50 -- +1900-03-01 + +-- !test_51 -- +1970-01-01 + +-- !test_52 -- +9999-12-01 + +-- !test_53 -- +0000-01-03 + +-- !test_54 -- +0000-02-28 + +-- !test_55 -- +0001-02-26 + +-- !test_56 -- +1900-02-19 + +-- !test_57 -- +1969-12-29 + +-- !test_58 -- +9999-12-27 + +-- !test_59 -- +0000-01-03 + +-- !test_60 -- +0000-03-05 + +-- !test_61 -- +0001-03-03 + +-- !test_62 -- +1900-02-25 + +-- !test_63 -- +1969-12-29 + +-- !test_64 -- +9999-12-31 + +-- !test_65 -- +0000-02-28 + +-- !test_66 -- +0001-03-01 + +-- !test_67 -- +1900-02-28 + +-- !test_68 -- +1970-01-01 + +-- !test_69 -- +9999-12-31 + +-- !test_70 -- +0000-03-01 + +-- !test_71 -- +0000-03-02 + +-- !test_72 -- +0000-03-03 + +-- !test_73 -- +\N + +-- !test_74 -- +0000-03-01 + +-- !test_75 -- +\N + +-- !test_76 -- +0001-03-01 + +-- !test_77 -- +\N + +-- !test_78 -- +1900-03-01 + +-- !test_79 -- +1970-02-28 + +-- !test_80 -- +1970-03-01 + +-- !test_81 -- +\N + +-- !test_82 -- +9999-03-01 + +-- !test_83 -- +2008-12-29 + +-- !test_84 -- +2010-01-03 + +-- !test_85 -- +\N + +-- !test_86 -- +2023-04-05 + +-- !test_101 -- +\N + +-- !test_102 -- +0230-01-01 + +-- !test_103 -- +\N + +-- !test_104 -- +\N + +-- !test_105 -- +\N + +-- !test_106 -- +\N + +-- !test_107 -- +\N + +-- !test_108 -- +\N + +-- !test_109 -- +\N + +-- !test_110 -- +\N + +-- !test_111 -- +\N + +-- !test_112 -- +\N + +-- !test_113 -- +\N + +-- !test_114 -- +\N + +-- !test_115 -- +5555-01-01 + +-- !test_116 -- +\N + +-- !test_116 -- +5555-01-01 + +-- !test_117 -- +\N + +-- !test_118 -- +\N + +-- !test_119 -- +\N + +-- !test_120 -- +\N + +-- !test_87 -- +2023-02-03 2023-02-03 2023-02-03 2023-02-03 2023-02-03 2023-02-03 +\N \N \N 2023-02-03 2023-02-03 2023-02-03 + +-- !test_88 -- +2023-02-03 2023-02-03 2023-02-03 2023-02-03 2023-02-03 2023-02-03 +\N \N \N 2023-02-03 2023-02-03 2023-02-03 + +-- !test_89 -- +\N +\N + diff --git a/regression-test/data/query_p0/sql_functions/datetime_functions/test_to_iso8601.out b/regression-test/data/query_p0/sql_functions/datetime_functions/test_to_iso8601.out new file mode 100644 index 00000000000000..358249891a132b --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/datetime_functions/test_to_iso8601.out @@ -0,0 +1,194 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !test_1 -- +2023-04-05 +2023-04-05 +2023-04-05 +0000-01-03 +9999-12-31 +\N + +-- !test_2 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.000000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.000000 +\N + +-- !test_3 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.100000 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.900000 +\N + +-- !test_4 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.120000 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.990000 +\N + +-- !test_5 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123000 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999000 +\N + +-- !test_6 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123400 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999900 +\N + +-- !test_7 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123456 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999999 +\N + +-- !test_8 -- +2023-04-05 +2023-04-05 +2023-04-05 +0000-01-03 +9999-12-31 +9999-12-31 + +-- !test_9 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.000000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.000000 +9999-12-31T23:59:59.000000 + +-- !test_10 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.100000 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.900000 +9999-12-31T23:59:59.900000 + +-- !test_11 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.120000 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.990000 +9999-12-31T23:59:59.990000 + +-- !test_12 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123000 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999000 +9999-12-31T23:59:59.999000 + +-- !test_13 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123400 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999900 +9999-12-31T23:59:59.999900 + +-- !test_14 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123456 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999999 +9999-12-31T23:59:59.999999 + +-- !test_7_2 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123456 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999999 +\N + +-- !test_14_2 -- +2023-04-05T03:04:05.000000 +2023-04-05T03:04:05.123456 +2023-04-05T03:04:05.100000 +0000-01-03T00:00:00.000000 +9999-12-31T23:59:59.999999 +9999-12-31T23:59:59.999999 + +-- !test_14_2 -- +\N +\N +\N +\N +\N +\N + +-- !test_15 -- +2023-01-03 + +-- !test_16 -- +2023-01-03T00:00:00.000000 + +-- !test_17 -- +0000-01-03 + +-- !test_18 -- +0000-01-03T00:00:00.000000 + +-- !test_19 -- +0000-12-31 + +-- !test_20 -- +0000-12-31T23:59:59.000000 + +-- !test_21 -- +0000-02-28 + +-- !test_22 -- +0000-02-28T00:00:00.000000 + +-- !test_23 -- +\N + +-- !test_24 -- +\N + +-- !test_25 -- +1900-02-28 + +-- !test_26 -- +1900-02-28T00:00:00.000000 + +-- !test_27 -- +9999-12-31 + +-- !test_28 -- +9999-12-31T23:59:59.000000 + +-- !test_29 -- +1970-01-01 + +-- !test_30 -- +1970-01-01T00:00:00.000000 + +-- !test_31 -- +1970-01-01 + +-- !test_32 -- +\N + +-- !test_33 -- +\N + diff --git a/regression-test/data/query_p0/sql_functions/encryption_digest/test_encryption_function.out b/regression-test/data/query_p0/sql_functions/encryption_digest/test_encryption_function.out index c652b3074558cc..3b3f06d98d8ccc 100644 --- a/regression-test/data/query_p0/sql_functions/encryption_digest/test_encryption_function.out +++ b/regression-test/data/query_p0/sql_functions/encryption_digest/test_encryption_function.out @@ -272,3 +272,6 @@ zhang -- !sql56 -- zhang +-- !sql57 -- +\N + diff --git a/regression-test/data/query_p0/sql_functions/json_functions/json_search.out b/regression-test/data/query_p0/sql_functions/json_functions/json_search.out new file mode 100644 index 00000000000000..d5ecb9cd3b00bf --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/json_functions/json_search.out @@ -0,0 +1,139 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !one_is_valid_or_null -- +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +7 \N one _% \N \N +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all \N \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all X \N \N + +-- !all_const1 -- +"$[2].C" + +-- !all_const1 -- +"$[2].C" + +-- !all_const2 -- +["$[3].D","$[2].C"] + +-- !all_const2 -- +["$[3].D","$[2].C"] + +-- !all_const3 -- +"$[0]" + +-- !all_const4 -- +"$[0]" + +-- !all_const5 -- +"$[0]" + +-- !all_const6 -- +"$[2].C" + +-- !all_const7 -- +\N + +-- !all_const8 -- +\N + +-- !one_is_one_const -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" "$[0]" +7 \N one _% \N \N +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one \N \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one X \N \N + +-- !one_is_all_const -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] ["$[3].D","$[2].C","$[1][0].B","$[0]"] +7 \N all _% \N \N +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all \N \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all X \N \N + +-- !one_and_pattern_is_const1 -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +7 \N one A \N \N +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one A "$[0]" "$[0]" + +-- !one_and_pattern_is_const2 -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +7 \N all A \N \N +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all A "$[0]" "$[0]" + +-- !one_and_pattern_is_nullconst -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +7 \N \N \N \N \N +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] \N \N \N \N + +-- !json_const1 -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +7 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one _% "$[0]" +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] one X \N + +-- !json_const2 -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +7 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all _% ["$[3].D","$[2].C","$[1][0].B","$[0]"] +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] all X \N + +-- !one_case1 -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +7 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One _% "$[0]" +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] One X \N + +-- !one_case2 -- +1 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +2 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +3 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +4 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +5 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +6 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +7 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All _% "$[0]" +8 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All \N \N +9 ["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}] All X \N + diff --git a/regression-test/data/query_p0/sql_functions/math_functions/test_normal_cdf.out b/regression-test/data/query_p0/sql_functions/math_functions/test_normal_cdf.out new file mode 100644 index 00000000000000..7b500151a9ac79 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/math_functions/test_normal_cdf.out @@ -0,0 +1,201 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !test_1 -- +0.9750021048517796 0.9750021048517796 +0.5 0.5 +0.0013498980316301035 0.0013498980316301035 +\N \N +\N \N +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 + +-- !test_2 -- +0.9750021048517796 0.9750021048517796 +0.185839346177947 0.185839346177947 +0.9502845653746862 0.9502845653746862 +\N \N +\N \N +\N 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 + +-- !test_3 -- +0.9750021048517796 0.9750021048517796 +0.8667397370974945 0.8667397370974945 +1.0188922978077164E-4 1.0188922978077164E-4 +\N \N +\N \N +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 + +-- !test_4 -- +0.9750021048517796 0.9750021048517796 +0.5 0.5 +1.488228429380456E-10 1.488228429380456E-10 +0.8413447460685429 0.8413447460685429 +0.8413447460685429 0.8413447460685429 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 + +-- !test_5 -- +0.9750021048517796 0.9750021048517796 +1.0 1.0 +3.1086244689504383E-15 3.1086244689504383E-15 +0.8413447460685429 0.8413447460685429 +0.8413447460685429 0.8413447460685429 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 + +-- !test_6 -- +0.9750021048517796 0.9750021048517796 +4.440892098500626E-16 4.440892098500626E-16 +0.9997299123060366 0.9997299123060366 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 +\N 0.9750021048517796 + +-- !test_7 -- +0.9750021048517796 0.9750021048517796 +0.5861988701119502 0.5861988701119502 +0.8246760551477705 0.8246760551477705 +\N \N +\N \N +\N \N +\N \N +0.9750021048517796 0.9750021048517796 +\N \N +\N \N +\N \N +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 +0.9750021048517796 0.9750021048517796 + +-- !test_8 -- +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N + +-- !test_9 -- +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N + +-- !test_10 -- +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N + +-- !test_11 -- +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N +\N \N + +-- !test_12 -- +1 0.0 1.0 0.9750021048517796 0.9750021048517796 0.9750021048517796 + +-- !test_13 -- +0.9750021048517796 + +-- !test_14 -- +0.9750021048517796 + +-- !test_15 -- +0.9750021048517796 + +-- !test_16 -- +\N + +-- !test_17 -- +\N + +-- !test_18 -- +\N + +-- !test_19 -- +\N + diff --git a/regression-test/data/query_p0/sql_functions/string_functions/test_split_by_string.out b/regression-test/data/query_p0/sql_functions/string_functions/test_split_by_string.out index c46fa2bd27e2cc..0a335392197c66 100644 --- a/regression-test/data/query_p0/sql_functions/string_functions/test_split_by_string.out +++ b/regression-test/data/query_p0/sql_functions/string_functions/test_split_by_string.out @@ -2,6 +2,9 @@ -- !sql -- ["a", "b", "c", "d", "e"] +-- !sql -- +["你", "a", "好", "b", "世", "c", "界"] + -- !sql -- ["1", "2", "5", "5", "3"] @@ -74,6 +77,7 @@ 9 a,b,c, , ["a", "b", "c", ""] 10 \N , \N 11 a,b,c,12345, , ["a", "b", "c", "12345", ""] +12 你a好b世c界 ["你", "a", "好", "b", "世", "c", "界"] -- !sql -- 1 1,,2,3,,4,5,,abcde ,, ["1", "2,3", "4,5", "abcde"] diff --git a/regression-test/data/query_p0/sql_functions/string_functions/test_string_function.out b/regression-test/data/query_p0/sql_functions/string_functions/test_string_function.out index dfcf50a244b48a..cadf5039794dd8 100644 Binary files a/regression-test/data/query_p0/sql_functions/string_functions/test_string_function.out and b/regression-test/data/query_p0/sql_functions/string_functions/test_string_function.out differ diff --git a/regression-test/data/query_p0/sql_functions/string_functions/test_translate.out b/regression-test/data/query_p0/sql_functions/string_functions/test_translate.out new file mode 100644 index 00000000000000..951d1012c3b823 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/string_functions/test_translate.out @@ -0,0 +1,493 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable -- + +-- !empty_not_nullable -- + +-- !empty_partial_nullable -- + +-- !nullable -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +!!! +!@ +!@!@!$!^ +!@#!@# +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +111 +12 +1211131 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123233333 +\\\\\\ +\\a +\\a\\\\a\\ +\\a\\a\\c\\dccd +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +中中中 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文中中中 +中文中文 +中文文 + +-- !not_nullable -- + + + +!@#@#$#^$%%$^ +123123 +\\a\\b\\c\\d +中文 + +-- !partial_nullable -- +\N + + +!@#@#$#^$%%$^ +123123 +\\a\\b\\c\\d +中文 + +-- !nullable_no_null -- + + + +!@#@#$#^$%%$^ +123123 +\\a\\b\\c\\d +中文 + +-- !const_nullable -- +\N +\N +\N +\N +\N +\N +\N + +-- !partial_const_nullable -- +\N +\N +\N +\N +\N +\N +\N + +-- !const_not_nullable -- +a +a +a +a +a +a +a + +-- !const_other_nullable -- +\N +x +x +x +x +x +x + +-- !const_other_not_nullable -- + + + +! +1 +\\ +中 + +-- !const_nullable_no_null -- +abc + +-- !const_partial_nullable_no_null -- +xyz + +-- !const1 -- +\N +xyz +xyz +xyz +xyz +xyz +xyz + +-- !const12 -- +\N +xyz +xyz +xyz +xyz +xyz +xyz + +-- !const23 -- + + + +!@#@#$#^$%%$^ +123123 +\\a\\b\\c\\d +中文 + +-- !const3 -- +\N + +aaa +ab +abaaa +abcabc +abcbcc + +-- !1 -- +abcd + +-- !2 -- +zbcd + +-- !3 -- +zbcdz + +-- !4 -- +zbd + +-- !5 -- +zbxd + +-- !6 -- +中bxd + +-- !7 -- +文文 + +-- !8 -- +a文 + +-- !9 -- +tttttt + diff --git a/regression-test/data/query_p0/sql_functions/string_functions/test_url_decode.out b/regression-test/data/query_p0/sql_functions/string_functions/test_url_decode.out new file mode 100644 index 00000000000000..7199df82b4b422 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/string_functions/test_url_decode.out @@ -0,0 +1,43 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable -- + +-- !empty_not_nullable -- + +-- !nullable -- +\N + +/home/doris/directory/ +1234567890 +ABCDEFGHIJKLMNOPQRSTUWXYZ +~!@#%^&*()<>?,./:{}|[]\\_+-= + +-- !not_nullable -- + + +/home/doris/directory/ +1234567890 +ABCDEFGHIJKLMNOPQRSTUWXYZ +~!@#%^&*()<>?,./:{}|[]\\_+-= + +-- !nullable_no_null -- + + +/home/doris/directory/ +1234567890 +ABCDEFGHIJKLMNOPQRSTUWXYZ +~!@#%^&*()<>?,./:{}|[]\\_+-= + +-- !const_nullable -- + + + + + + + +-- !const_not_nullable -- +/home/doris/directory/ + +-- !const_nullable_no_null -- +/home/doris/directory/ + diff --git a/regression-test/data/query_p0/sql_functions/string_functions/test_url_encode.out b/regression-test/data/query_p0/sql_functions/string_functions/test_url_encode.out new file mode 100644 index 00000000000000..23b82546e3a85d --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/string_functions/test_url_encode.out @@ -0,0 +1,43 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable -- + +-- !empty_not_nullable -- + +-- !nullable -- +\N + +%2Fhome%2Fdoris%2Fdirectory%2F +%7E%21%40%23%25%5E%26*%28%29%3C%3E%3F%2C.%2F%3A%7B%7D%7C%5B%5D%5C_%2B-%3D +1234567890 +ABCDEFGHIJKLMNOPQRSTUWXYZ + +-- !not_nullable -- + + +%2Fhome%2Fdoris%2Fdirectory%2F +%7E%21%40%23%25%5E%26*%28%29%3C%3E%3F%2C.%2F%3A%7B%7D%7C%5B%5D%5C_%2B-%3D +1234567890 +ABCDEFGHIJKLMNOPQRSTUWXYZ + +-- !nullable_no_null -- + + +%2Fhome%2Fdoris%2Fdirectory%2F +%7E%21%40%23%25%5E%26*%28%29%3C%3E%3F%2C.%2F%3A%7B%7D%7C%5B%5D%5C_%2B-%3D +1234567890 +ABCDEFGHIJKLMNOPQRSTUWXYZ + +-- !const_nullable -- + + + + + + + +-- !const_not_nullable -- +%2Fhome%2Fdoris%2Fdirectory%2F + +-- !const_nullable_no_null -- +%2Fhome%2Fdoris%2Fdirectory%2F + diff --git a/regression-test/data/query_p0/sql_functions/test_template_one_arg.out b/regression-test/data/query_p0/sql_functions/test_template_one_arg.out new file mode 100644 index 00000000000000..1648b8ad2b0d36 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/test_template_one_arg.out @@ -0,0 +1,83 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable -- + +-- !empty_not_nullable -- + +-- !all_null -- +\N +\N +\N + +-- !nullable -- +\N +\N +\N +\N +-0.1001674211615598 +-1.5707963267948966 +0.0 +0.0 +0.1001674211615598 +1.0E-100 +1.0E-15 +1.5707963267948966 + +-- !not_nullable -- +\N +\N +\N +-0.1001674211615598 +-1.5707963267948966 +0.0 +0.0 +0.0 +0.1001674211615598 +1.0E-100 +1.0E-15 +1.5707963267948966 + +-- !nullable_no_null -- +\N +\N +\N +-0.1001674211615598 +-1.5707963267948966 +0.0 +0.0 +0.0 +0.1001674211615598 +1.0E-100 +1.0E-15 +1.5707963267948966 + +-- !const_nullable -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N + +-- !const_not_nullable -- +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 +0.5235987755982989 + +-- !const_nullable_no_null -- +0.5235987755982989 + diff --git a/regression-test/data/query_p0/sql_functions/test_template_three_args.out b/regression-test/data/query_p0/sql_functions/test_template_three_args.out new file mode 100644 index 00000000000000..b85ce8c74e111f --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/test_template_three_args.out @@ -0,0 +1,471 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable -- + +-- !empty_not_nullable -- + +-- !empty_partial_nullable -- + +-- !all_null -- +\N +\N +\N + +-- !nullable -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N + + + + + + + + + !@#@#$#^$%%$^ + 123123 + \\a\\b\\c\\d + 中文 + !@#@#$#^$%%$^ + !@#@#$#^$%%$^ + !@#@#$#^$%%$^ + !@#@#$#^$%%$^ + !@#@#$#^$%%$^!@#@#$#^$%%$^ + !@#@#$#^$%%$^123123 + !@#@#$#^$%%$^\\a\\b\\c\\d + !@#@#$#^$%%$^中文 + 123123 + 123123 + 123123 + 123123 + 123123!@#@#$#^$%%$^ + 123123123123 + 123123\\a\\b\\c\\d + 123123中文 + \\a\\b\\c\\d + \\a\\b\\c\\d + \\a\\b\\c\\d + \\a\\b\\c\\d + \\a\\b\\c\\d!@#@#$#^$%%$^ + \\a\\b\\c\\d123123 + \\a\\b\\c\\d\\a\\b\\c\\d + \\a\\b\\c\\d中文 + 中文 + 中文 + 中文 + 中文 + 中文!@#@#$#^$%%$^ + 中文123123 + 中文\\a\\b\\c\\d + 中文中文 +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ +!@#@#$#^$%%$^ !@#@#$#^$%%$^ +!@#@#$#^$%%$^ 123123 +!@#@#$#^$%%$^ \\a\\b\\c\\d +!@#@#$#^$%%$^ 中文 +!@#@#$#^$%%$^!@#@#$#^$%%$^ +!@#@#$#^$%%$^!@#@#$#^$%%$^ +!@#@#$#^$%%$^!@#@#$#^$%%$^ +!@#@#$#^$%%$^!@#@#$#^$%%$^ +!@#@#$#^$%%$^!@#@#$#^$%%$^!@#@#$#^$%%$^ +!@#@#$#^$%%$^!@#@#$#^$%%$^123123 +!@#@#$#^$%%$^!@#@#$#^$%%$^\\a\\b\\c\\d +!@#@#$#^$%%$^!@#@#$#^$%%$^中文 +!@#@#$#^$%%$^123123 +!@#@#$#^$%%$^123123 +!@#@#$#^$%%$^123123 +!@#@#$#^$%%$^123123 +!@#@#$#^$%%$^123123!@#@#$#^$%%$^ +!@#@#$#^$%%$^123123123123 +!@#@#$#^$%%$^123123\\a\\b\\c\\d +!@#@#$#^$%%$^123123中文 +!@#@#$#^$%%$^\\a\\b\\c\\d +!@#@#$#^$%%$^\\a\\b\\c\\d +!@#@#$#^$%%$^\\a\\b\\c\\d +!@#@#$#^$%%$^\\a\\b\\c\\d +!@#@#$#^$%%$^\\a\\b\\c\\d!@#@#$#^$%%$^ +!@#@#$#^$%%$^\\a\\b\\c\\d123123 +!@#@#$#^$%%$^\\a\\b\\c\\d\\a\\b\\c\\d +!@#@#$#^$%%$^\\a\\b\\c\\d中文 +!@#@#$#^$%%$^中文 +!@#@#$#^$%%$^中文 +!@#@#$#^$%%$^中文 +!@#@#$#^$%%$^中文 +!@#@#$#^$%%$^中文!@#@#$#^$%%$^ +!@#@#$#^$%%$^中文123123 +!@#@#$#^$%%$^中文\\a\\b\\c\\d +!@#@#$#^$%%$^中文中文 +123123 +123123 +123123 +123123 +123123 +123123 +123123 +123123 !@#@#$#^$%%$^ +123123 123123 +123123 \\a\\b\\c\\d +123123 中文 +123123!@#@#$#^$%%$^ +123123!@#@#$#^$%%$^ +123123!@#@#$#^$%%$^ +123123!@#@#$#^$%%$^ +123123!@#@#$#^$%%$^!@#@#$#^$%%$^ +123123!@#@#$#^$%%$^123123 +123123!@#@#$#^$%%$^\\a\\b\\c\\d +123123!@#@#$#^$%%$^中文 +123123123123 +123123123123 +123123123123 +123123123123 +123123123123!@#@#$#^$%%$^ +123123123123123123 +123123123123\\a\\b\\c\\d +123123123123中文 +123123\\a\\b\\c\\d +123123\\a\\b\\c\\d +123123\\a\\b\\c\\d +123123\\a\\b\\c\\d +123123\\a\\b\\c\\d!@#@#$#^$%%$^ +123123\\a\\b\\c\\d123123 +123123\\a\\b\\c\\d\\a\\b\\c\\d +123123\\a\\b\\c\\d中文 +123123中文 +123123中文 +123123中文 +123123中文 +123123中文!@#@#$#^$%%$^ +123123中文123123 +123123中文\\a\\b\\c\\d +123123中文中文 +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d +\\a\\b\\c\\d !@#@#$#^$%%$^ +\\a\\b\\c\\d 123123 +\\a\\b\\c\\d \\a\\b\\c\\d +\\a\\b\\c\\d 中文 +\\a\\b\\c\\d!@#@#$#^$%%$^ +\\a\\b\\c\\d!@#@#$#^$%%$^ +\\a\\b\\c\\d!@#@#$#^$%%$^ +\\a\\b\\c\\d!@#@#$#^$%%$^ +\\a\\b\\c\\d!@#@#$#^$%%$^!@#@#$#^$%%$^ +\\a\\b\\c\\d!@#@#$#^$%%$^123123 +\\a\\b\\c\\d!@#@#$#^$%%$^\\a\\b\\c\\d +\\a\\b\\c\\d!@#@#$#^$%%$^中文 +\\a\\b\\c\\d123123 +\\a\\b\\c\\d123123 +\\a\\b\\c\\d123123 +\\a\\b\\c\\d123123 +\\a\\b\\c\\d123123!@#@#$#^$%%$^ +\\a\\b\\c\\d123123123123 +\\a\\b\\c\\d123123\\a\\b\\c\\d +\\a\\b\\c\\d123123中文 +\\a\\b\\c\\d\\a\\b\\c\\d +\\a\\b\\c\\d\\a\\b\\c\\d +\\a\\b\\c\\d\\a\\b\\c\\d +\\a\\b\\c\\d\\a\\b\\c\\d +\\a\\b\\c\\d\\a\\b\\c\\d!@#@#$#^$%%$^ +\\a\\b\\c\\d\\a\\b\\c\\d123123 +\\a\\b\\c\\d\\a\\b\\c\\d\\a\\b\\c\\d +\\a\\b\\c\\d\\a\\b\\c\\d中文 +\\a\\b\\c\\d中文 +\\a\\b\\c\\d中文 +\\a\\b\\c\\d中文 +\\a\\b\\c\\d中文 +\\a\\b\\c\\d中文!@#@#$#^$%%$^ +\\a\\b\\c\\d中文123123 +\\a\\b\\c\\d中文\\a\\b\\c\\d +\\a\\b\\c\\d中文中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 +中文 !@#@#$#^$%%$^ +中文 123123 +中文 \\a\\b\\c\\d +中文 中文 +中文!@#@#$#^$%%$^ +中文!@#@#$#^$%%$^ +中文!@#@#$#^$%%$^ +中文!@#@#$#^$%%$^ +中文!@#@#$#^$%%$^!@#@#$#^$%%$^ +中文!@#@#$#^$%%$^123123 +中文!@#@#$#^$%%$^\\a\\b\\c\\d +中文!@#@#$#^$%%$^中文 +中文123123 +中文123123 +中文123123 +中文123123 +中文123123!@#@#$#^$%%$^ +中文123123123123 +中文123123\\a\\b\\c\\d +中文123123中文 +中文\\a\\b\\c\\d +中文\\a\\b\\c\\d +中文\\a\\b\\c\\d +中文\\a\\b\\c\\d +中文\\a\\b\\c\\d!@#@#$#^$%%$^ +中文\\a\\b\\c\\d123123 +中文\\a\\b\\c\\d\\a\\b\\c\\d +中文\\a\\b\\c\\d中文 +中文中文 +中文中文 +中文中文 +中文中文 +中文中文!@#@#$#^$%%$^ +中文中文123123 +中文中文\\a\\b\\c\\d +中文中文中文 + +-- !not_nullable -- + + + +!@#@#$#^$%%$^!@#@#$#^$%%$^!@#@#$#^$%%$^ +123123123123123123 +\\a\\b\\c\\d\\a\\b\\c\\d\\a\\b\\c\\d +中文中文中文 + +-- !partial_nullable -- +\N + + +!@#@#$#^$%%$^!@#@#$#^$%%$^!@#@#$#^$%%$^ +123123123123123123 +\\a\\b\\c\\d\\a\\b\\c\\d\\a\\b\\c\\d +中文中文中文 + +-- !nullable_no_null -- + + + +!@#@#$#^$%%$^!@#@#$#^$%%$^!@#@#$#^$%%$^ +123123123123123123 +\\a\\b\\c\\d\\a\\b\\c\\d\\a\\b\\c\\d +中文中文中文 + +-- !const_nullable -- +\N +\N +\N +\N +\N +\N +\N + +-- !partial_const_nullable -- +\N +\N +\N +\N +\N +\N +\N + +-- !const_not_nullable -- +abc +abc +abc +abc +abc +abc +abc + +-- !const_other_nullable -- +\N +x +x +x!@#@#$#^$%%$^!@#@#$#^$%%$^ +x123123123123 +x\\a\\b\\c\\d\\a\\b\\c\\d +x中文中文 + +-- !const_other_not_nullable -- +xx +xx +xx +xx!@#@#$#^$%%$^ +xx123123 +xx\\a\\b\\c\\d +xx中文 + +-- !const_nullable_no_null -- +abc中文xxx + +-- !const_partial_nullable_no_null -- +xyzaa + +-- !const1 -- +\N +xyz +xyz +xyz!@#@#$#^$%%$^!@#@#$#^$%%$^ +xyz123123123123 +xyz\\a\\b\\c\\d\\a\\b\\c\\d +xyz中文中文 + +-- !const12 -- +\N +xyzabc +xyzabc +xyzabc!@#@#$#^$%%$^ +xyzabc123123 +xyzabc\\a\\b\\c\\d +xyzabc中文 + +-- !const23 -- + xyzabc +!@#@#$#^$%%$^xyzabc +123123xyzabc +\\a\\b\\c\\dxyzabc +xyzabc +xyzabc +中文xyzabc + +-- !const3 -- +\N + abc +!@#@#$#^$%%$^!@#@#$#^$%%$^abc +123123123123abc +\\a\\b\\c\\d\\a\\b\\c\\dabc +abc +中文中文abc + diff --git a/regression-test/data/query_p0/sql_functions/test_template_two_args.out b/regression-test/data/query_p0/sql_functions/test_template_two_args.out new file mode 100644 index 00000000000000..e12f22f155c0c7 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/test_template_two_args.out @@ -0,0 +1,279 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable -- + +-- !empty_not_nullable -- + +-- !empty_partial_nullable -- + +-- !all_null -- +\N +\N +\N + +-- !nullable -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +-0.008129902179943912 +-0.09966865249116204 +-0.7853981633974483 +-0.7853981633974483 +-0.7853981633974483 +-1.0E-100 +-1.0E-101 +-1.4711276743037345 +-1.5707963267948866 +-1.5707963267948957 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.5707963267948966 +-1.6704649792860586 +-2.356194490192345 +-2.356194490192345 +-2.356194490192345 +-3.0419240010986313 +-3.141592653589793 +-3.141592653589793 +-8.130079509533991E-4 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.0 +0.008129902179943912 +0.09966865249116204 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +1.0E-100 +1.0E-100 +1.0E-101 +1.0E-115 +1.0E-14 +1.0E-15 +1.0E-200 +1.0E-85 +1.0E-99 +1.23E-98 +1.4711276743037345 +1.5626664246149526 +1.5699833188439432 +1.5707963267948866 +1.5707963267948957 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.57160933474585 +1.5789262289748405 +1.6704649792860586 +2.356194490192345 +2.356194490192345 +2.356194490192345 +3.0419240010986313 +3.1415926535897833 +3.1415926535897922 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +3.141592653589793 +8.130079509533991E-4 +8.130081300813008E-103 +8.130081300813008E-18 + +-- !not_nullable -- +-2.356194490192345 +-2.356194490192345 +-2.356194490192345 +0.0 +0.0 +0.0 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 + +-- !partial_nullable -- +\N +-2.356194490192345 +-2.356194490192345 +-2.356194490192345 +0.0 +0.0 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 + +-- !nullable_no_null -- +-2.356194490192345 +-2.356194490192345 +-2.356194490192345 +0.0 +0.0 +0.0 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 +0.7853981633974483 + +-- !const_nullable -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N + +-- !partial_const_nullable -- +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N + +-- !const_not_nullable -- +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 +0.0049999583339583225 + +-- !const_other_nullable -- +\N +0.08112239210090098 +1.0E-99 +1.4711276743037347 +1.5607966601082315 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5707963267948966 +1.5807959934815619 +1.6704649792860586 +3.141592653589793 + +-- !const_other_not_nullable -- +-0.00999966668666524 +-0.09966865249116202 +-1.5707963267948966 +0.0 +0.0 +0.0 +0.00999966668666524 +0.09966865249116202 +1.0000000000000001E-16 +1.0E-101 +1.4896739346939956 +1.5707963267948966 + +-- !const_nullable_no_null -- +1.5707963267948966 + +-- !const_nullable_no_null_multirows -- +1.5707963267948966 + +-- !const_partial_nullable_no_null -- +1.5707963267948966 + diff --git a/regression-test/data/query_p0/sql_functions/window_functions/test_partition_topn.out b/regression-test/data/query_p0/sql_functions/window_functions/test_partition_topn.out new file mode 100644 index 00000000000000..8d2ce970235fee --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/window_functions/test_partition_topn.out @@ -0,0 +1,5 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_topn -- +1 1 +12 1 + diff --git a/regression-test/data/query_p0/system/test_partitions_schema.out b/regression-test/data/query_p0/system/test_partitions_schema.out index 781be7e41eefd1..a6c379ffc5bb46 100644 --- a/regression-test/data/query_p0/system/test_partitions_schema.out +++ b/regression-test/data/query_p0/system/test_partitions_schema.out @@ -9,35 +9,35 @@ test_range_table p4 3 test_range_table p5 0 -- !select_check_1 -- -internal test_partitions_schema_db duplicate_table duplicate_table NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 -internal test_partitions_schema_db listtable p1_city NULL 0 0 LIST NULL user_id, city NULL (("1", "Beijing"),("1", "Shanghai")) -1 0 0 0 0 0 0 -internal test_partitions_schema_db listtable p2_city NULL 0 0 LIST NULL user_id, city NULL (("2", "Beijing"),("2", "Shanghai")) -1 0 0 0 0 0 0 -internal test_partitions_schema_db listtable p3_city NULL 0 0 LIST NULL user_id, city NULL (("3", "Beijing"),("3", "Shanghai")) -1 0 0 0 0 0 0 -internal test_partitions_schema_db randomtable randomtable NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 -internal test_partitions_schema_db test_range_table p0 NULL 0 0 RANGE NULL col_1 NULL [('-2147483648'), ('4')) 9 636 5728 0 0 0 0 -internal test_partitions_schema_db test_range_table p1 NULL 0 0 RANGE NULL col_1 NULL [('4'), ('6')) 2 959 1919 0 0 0 0 -internal test_partitions_schema_db test_range_table p100 NULL 0 0 RANGE NULL col_1 NULL [('83647'), ('2147483647')) 4 735 2941 0 0 0 0 -internal test_partitions_schema_db test_range_table p2 NULL 0 0 RANGE NULL col_1 NULL [('6'), ('7')) 1 975 975 0 0 0 0 -internal test_partitions_schema_db test_range_table p3 NULL 0 0 RANGE NULL col_1 NULL [('7'), ('8')) 1 959 959 0 0 0 0 -internal test_partitions_schema_db test_range_table p4 NULL 0 0 RANGE NULL col_1 NULL [('8'), ('10')) 3 948 2846 0 0 0 0 -internal test_partitions_schema_db test_range_table p5 NULL 0 0 RANGE NULL col_1 NULL [('10'), ('83647')) 0 0 0 0 0 0 0 -internal test_partitions_schema_db test_row_column_page_size1 test_row_column_page_size1 NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 -internal test_partitions_schema_db test_row_column_page_size2 test_row_column_page_size2 NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 +internal test_partitions_schema_db duplicate_table duplicate_table NULL 0 0 UNPARTITIONED NULL NULL NULL NULL +internal test_partitions_schema_db listtable p1_city NULL 0 0 LIST NULL user_id, city NULL (("1", "Beijing"),("1", "Shanghai")) +internal test_partitions_schema_db listtable p2_city NULL 0 0 LIST NULL user_id, city NULL (("2", "Beijing"),("2", "Shanghai")) +internal test_partitions_schema_db listtable p3_city NULL 0 0 LIST NULL user_id, city NULL (("3", "Beijing"),("3", "Shanghai")) +internal test_partitions_schema_db randomtable randomtable NULL 0 0 UNPARTITIONED NULL NULL NULL NULL +internal test_partitions_schema_db test_range_table p0 NULL 0 0 RANGE NULL col_1 NULL [('-2147483648'), ('4')) +internal test_partitions_schema_db test_range_table p1 NULL 0 0 RANGE NULL col_1 NULL [('4'), ('6')) +internal test_partitions_schema_db test_range_table p100 NULL 0 0 RANGE NULL col_1 NULL [('83647'), ('2147483647')) +internal test_partitions_schema_db test_range_table p2 NULL 0 0 RANGE NULL col_1 NULL [('6'), ('7')) +internal test_partitions_schema_db test_range_table p3 NULL 0 0 RANGE NULL col_1 NULL [('7'), ('8')) +internal test_partitions_schema_db test_range_table p4 NULL 0 0 RANGE NULL col_1 NULL [('8'), ('10')) +internal test_partitions_schema_db test_range_table p5 NULL 0 0 RANGE NULL col_1 NULL [('10'), ('83647')) +internal test_partitions_schema_db test_row_column_page_size1 test_row_column_page_size1 NULL 0 0 UNPARTITIONED NULL NULL NULL NULL +internal test_partitions_schema_db test_row_column_page_size2 test_row_column_page_size2 NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -- !select_check_2 -- -internal test_partitions_schema_db duplicate_table duplicate_table NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 -internal test_partitions_schema_db listtable p1_city NULL 0 0 LIST NULL user_id, city NULL (("1", "Beijing"),("1", "Shanghai")) -1 0 0 0 0 0 0 -internal test_partitions_schema_db listtable p2_city NULL 0 0 LIST NULL user_id, city NULL (("2", "Beijing"),("2", "Shanghai")) -1 0 0 0 0 0 0 -internal test_partitions_schema_db listtable p3_city NULL 0 0 LIST NULL user_id, city NULL (("3", "Beijing"),("3", "Shanghai")) -1 0 0 0 0 0 0 -internal test_partitions_schema_db randomtable randomtable NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 -internal test_partitions_schema_db test_range_table p0 NULL 0 0 RANGE NULL col_1 NULL [('-2147483648'), ('4')) 9 636 5728 0 0 0 0 -internal test_partitions_schema_db test_range_table p1 NULL 0 0 RANGE NULL col_1 NULL [('4'), ('6')) 2 959 1919 0 0 0 0 -internal test_partitions_schema_db test_range_table p100 NULL 0 0 RANGE NULL col_1 NULL [('83647'), ('2147483647')) 4 735 2941 0 0 0 0 -internal test_partitions_schema_db test_range_table p2 NULL 0 0 RANGE NULL col_1 NULL [('6'), ('7')) 1 975 975 0 0 0 0 -internal test_partitions_schema_db test_range_table p3 NULL 0 0 RANGE NULL col_1 NULL [('7'), ('8')) 1 959 959 0 0 0 0 -internal test_partitions_schema_db test_range_table p4 NULL 0 0 RANGE NULL col_1 NULL [('8'), ('10')) 3 948 2846 0 0 0 0 -internal test_partitions_schema_db test_range_table p5 NULL 0 0 RANGE NULL col_1 NULL [('10'), ('83647')) 0 0 0 0 0 0 0 -internal test_partitions_schema_db test_row_column_page_size1 test_row_column_page_size1 NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -1 0 0 0 0 0 0 +internal test_partitions_schema_db duplicate_table duplicate_table NULL 0 0 UNPARTITIONED NULL NULL NULL NULL +internal test_partitions_schema_db listtable p1_city NULL 0 0 LIST NULL user_id, city NULL (("1", "Beijing"),("1", "Shanghai")) +internal test_partitions_schema_db listtable p2_city NULL 0 0 LIST NULL user_id, city NULL (("2", "Beijing"),("2", "Shanghai")) +internal test_partitions_schema_db listtable p3_city NULL 0 0 LIST NULL user_id, city NULL (("3", "Beijing"),("3", "Shanghai")) +internal test_partitions_schema_db randomtable randomtable NULL 0 0 UNPARTITIONED NULL NULL NULL NULL +internal test_partitions_schema_db test_range_table p0 NULL 0 0 RANGE NULL col_1 NULL [('-2147483648'), ('4')) +internal test_partitions_schema_db test_range_table p1 NULL 0 0 RANGE NULL col_1 NULL [('4'), ('6')) +internal test_partitions_schema_db test_range_table p100 NULL 0 0 RANGE NULL col_1 NULL [('83647'), ('2147483647')) +internal test_partitions_schema_db test_range_table p2 NULL 0 0 RANGE NULL col_1 NULL [('6'), ('7')) +internal test_partitions_schema_db test_range_table p3 NULL 0 0 RANGE NULL col_1 NULL [('7'), ('8')) +internal test_partitions_schema_db test_range_table p4 NULL 0 0 RANGE NULL col_1 NULL [('8'), ('10')) +internal test_partitions_schema_db test_range_table p5 NULL 0 0 RANGE NULL col_1 NULL [('10'), ('83647')) +internal test_partitions_schema_db test_row_column_page_size1 test_row_column_page_size1 NULL 0 0 UNPARTITIONED NULL NULL NULL NULL -- !select_check_3 -- diff --git a/regression-test/data/query_p0/system/test_table_properties.out b/regression-test/data/query_p0/system/test_table_properties.out index f5ca9bb9220a0a..896df77f6f05db 100644 --- a/regression-test/data/query_p0/system/test_table_properties.out +++ b/regression-test/data/query_p0/system/test_table_properties.out @@ -12,7 +12,6 @@ internal test_table_properties_db duplicate_table compaction_policy size_based internal test_table_properties_db duplicate_table compression LZ4F internal test_table_properties_db duplicate_table data_sort.col_num 3 internal test_table_properties_db duplicate_table data_sort.sort_type LEXICAL -internal test_table_properties_db duplicate_table default.replication_allocation tag.location.default: 1 internal test_table_properties_db duplicate_table disable_auto_compaction false internal test_table_properties_db duplicate_table enable_mow_light_delete false internal test_table_properties_db duplicate_table enable_single_replica_compaction false @@ -45,7 +44,6 @@ internal test_table_properties_db listtable compaction_policy size_based internal test_table_properties_db listtable compression LZ4F internal test_table_properties_db listtable data_sort.col_num 6 internal test_table_properties_db listtable data_sort.sort_type LEXICAL -internal test_table_properties_db listtable default.replication_allocation tag.location.default: 1 internal test_table_properties_db listtable disable_auto_compaction false internal test_table_properties_db listtable enable_mow_light_delete false internal test_table_properties_db listtable enable_single_replica_compaction false @@ -78,7 +76,6 @@ internal test_table_properties_db unique_table compaction_policy size_based internal test_table_properties_db unique_table compression LZ4F internal test_table_properties_db unique_table data_sort.col_num 2 internal test_table_properties_db unique_table data_sort.sort_type LEXICAL -internal test_table_properties_db unique_table default.replication_allocation tag.location.default: 1 internal test_table_properties_db unique_table disable_auto_compaction false internal test_table_properties_db unique_table enable_mow_light_delete false internal test_table_properties_db unique_table enable_single_replica_compaction false @@ -113,7 +110,6 @@ internal test_table_properties_db duplicate_table compaction_policy size_based internal test_table_properties_db duplicate_table compression LZ4F internal test_table_properties_db duplicate_table data_sort.col_num 3 internal test_table_properties_db duplicate_table data_sort.sort_type LEXICAL -internal test_table_properties_db duplicate_table default.replication_allocation tag.location.default: 1 internal test_table_properties_db duplicate_table disable_auto_compaction false internal test_table_properties_db duplicate_table enable_mow_light_delete false internal test_table_properties_db duplicate_table enable_single_replica_compaction false @@ -146,7 +142,6 @@ internal test_table_properties_db unique_table compaction_policy size_based internal test_table_properties_db unique_table compression LZ4F internal test_table_properties_db unique_table data_sort.col_num 2 internal test_table_properties_db unique_table data_sort.sort_type LEXICAL -internal test_table_properties_db unique_table default.replication_allocation tag.location.default: 1 internal test_table_properties_db unique_table disable_auto_compaction false internal test_table_properties_db unique_table enable_mow_light_delete false internal test_table_properties_db unique_table enable_single_replica_compaction false @@ -183,7 +178,6 @@ internal test_table_properties_db duplicate_table compaction_policy size_based internal test_table_properties_db duplicate_table compression LZ4F internal test_table_properties_db duplicate_table data_sort.col_num 3 internal test_table_properties_db duplicate_table data_sort.sort_type LEXICAL -internal test_table_properties_db duplicate_table default.replication_allocation tag.location.default: 1 internal test_table_properties_db duplicate_table disable_auto_compaction false internal test_table_properties_db duplicate_table enable_mow_light_delete false internal test_table_properties_db duplicate_table enable_single_replica_compaction false diff --git a/regression-test/data/schema_change_p0/test_agg_schema_value_modify1.out b/regression-test/data/schema_change_p0/test_agg_schema_value_modify1.out new file mode 100644 index 00000000000000..3e2dffb78ff9c2 --- /dev/null +++ b/regression-test/data/schema_change_p0/test_agg_schema_value_modify1.out @@ -0,0 +1,11 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- ! -- +123456789 Alice 2 Beijing 25 0 13812345678 No. 123 Street, Beijing 2022-01-01T10:00 {"a":100, "b":200} ["abc","def"] +234567890 Bob 2 Shanghai 30 1 13998765432 No. 456 Street, Shanghai 2022-02-02T12:00 {"a":200, "b":200} ["abc","def"] +345678901 Carol 3 Guangzhou 28 0 13724681357 No. 789 Street, Guangzhou 2022-03-03T14:00 {"a":300, "b":200} ["abc","def"] +456789012 Dave 4 Shenzhen 35 1 13680864279 No. 987 Street, Shenzhen 2022-04-04T16:00 {"a":400, "b":200} ["abc","def"] +567890123 Eve 4 Chengdu 27 0 13572468091 No. 654 Street, Chengdu 2022-05-05T18:00 {"a":500, "b":200} ["abc","def"] +678901234 Frank 3 Hangzhou 32 1 13467985213 No. 321 Street, Hangzhou 2022-06-06T20:00 {"a":600, "b":200} ["abc","def"] +789012345 Grace 2 Xian 29 0 13333333333 No. 222 Street, Xian 2022-07-07T22:00 {"a":700, "b":200} ["abc","def"] +993456689 Alice 1 Yaan 25 0 13812345678 No. 123 Street, Beijing 2022-01-01T10:00 {"a":100, "b":200} ["abc","def"] + diff --git a/regression-test/data/show_p0/test_show_create_table_and_views.out b/regression-test/data/show_p0/test_show_create_table_and_views.out deleted file mode 100644 index c5885f8b004e9d..00000000000000 --- a/regression-test/data/show_p0/test_show_create_table_and_views.out +++ /dev/null @@ -1,46 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !show -- -show_create_table_and_views_table CREATE TABLE `show_create_table_and_views_table` (\n `user_id` largeint NOT NULL,\n `good_id` largeint NOT NULL,\n `cost` bigint SUM NULL DEFAULT "0",\n INDEX index_user_id (`user_id`) USING INVERTED COMMENT 'test index comment',\n INDEX index_good_id (`good_id`) USING INVERTED COMMENT 'test index\\" comment'\n) ENGINE=OLAP\nAGGREGATE KEY(`user_id`, `good_id`)\nPARTITION BY RANGE(`good_id`)\n(PARTITION p1 VALUES [("-170141183460469231731687303715884105728"), ("100")),\nPARTITION p2 VALUES [("100"), ("200")),\nPARTITION p3 VALUES [("200"), ("300")),\nPARTITION p4 VALUES [("300"), ("400")),\nPARTITION p5 VALUES [("400"), ("500")),\nPARTITION p6 VALUES [("500"), ("600")),\nPARTITION p7 VALUES [("600"), (MAXVALUE)))\nDISTRIBUTED BY HASH(`user_id`) BUCKETS 2\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"min_load_replica_num" = "-1",\n"is_being_synced" = "false",\n"storage_medium" = "hdd",\n"storage_format" = "V2",\n"inverted_index_storage_format" = "V2",\n"light_schema_change" = "true",\n"disable_auto_compaction" = "false",\n"binlog.enable" = "false",\n"binlog.ttl_seconds" = "86400",\n"binlog.max_bytes" = "9223372036854775807",\n"binlog.max_history_nums" = "9223372036854775807",\n"enable_single_replica_compaction" = "false",\n"group_commit_interval_ms" = "10000",\n"group_commit_data_bytes" = "134217728"\n); - --- !select -- -1 1 30 -1 2 5 -1 3 10 -1 300 2 -2 1 100 -2 2 10 -2 3 44 -2 200 1111 -3 1 10 -3 2 1 -23 900 1 -100 100 1 -200 20 1 -300 20 1 - --- !select -- -1 5 -2 10 -3 1 - --- !show -- -show_create_table_and_views_view CREATE VIEW `show_create_table_and_views_view` AS SELECT `user_id` AS `user_id`, `cost` AS `cost` FROM `show_create_table_and_views_db`.`show_create_table_and_views_table` WHERE (`good_id` = 2); utf8mb4 utf8mb4_0900_bin - --- !select -- -1 47 -2 1265 -3 11 -23 1 -100 1 -200 1 -300 1 - --- !show -- -show_create_table_and_views_table CREATE TABLE `show_create_table_and_views_table` (\n `user_id` largeint NOT NULL,\n `good_id` largeint NOT NULL,\n `cost` bigint SUM NULL DEFAULT "0",\n INDEX index_user_id (`user_id`) USING INVERTED COMMENT 'test index comment',\n INDEX index_good_id (`good_id`) USING INVERTED COMMENT 'test index\\" comment'\n) ENGINE=OLAP\nAGGREGATE KEY(`user_id`, `good_id`)\nPARTITION BY RANGE(`good_id`)\n(PARTITION p1 VALUES [("-170141183460469231731687303715884105728"), ("100")),\nPARTITION p2 VALUES [("100"), ("200")),\nPARTITION p3 VALUES [("200"), ("300")),\nPARTITION p4 VALUES [("300"), ("400")),\nPARTITION p5 VALUES [("400"), ("500")),\nPARTITION p6 VALUES [("500"), ("600")),\nPARTITION p7 VALUES [("600"), (MAXVALUE)))\nDISTRIBUTED BY HASH(`user_id`) BUCKETS 2\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"min_load_replica_num" = "-1",\n"is_being_synced" = "false",\n"storage_medium" = "hdd",\n"storage_format" = "V2",\n"inverted_index_storage_format" = "V2",\n"light_schema_change" = "true",\n"disable_auto_compaction" = "false",\n"binlog.enable" = "false",\n"binlog.ttl_seconds" = "86400",\n"binlog.max_bytes" = "9223372036854775807",\n"binlog.max_history_nums" = "9223372036854775807",\n"enable_single_replica_compaction" = "false",\n"group_commit_interval_ms" = "10000",\n"group_commit_data_bytes" = "134217728"\n); - --- !show -- -show_create_table_and_views_like CREATE TABLE `show_create_table_and_views_like` (\n `user_id` largeint NOT NULL,\n `good_id` largeint NOT NULL,\n `cost` bigint SUM NULL DEFAULT "0",\n INDEX index_user_id (`user_id`) USING INVERTED COMMENT 'test index comment',\n INDEX index_good_id (`good_id`) USING INVERTED COMMENT 'test index\\" comment'\n) ENGINE=OLAP\nAGGREGATE KEY(`user_id`, `good_id`)\nPARTITION BY RANGE(`good_id`)\n(PARTITION p1 VALUES [("-170141183460469231731687303715884105728"), ("100")),\nPARTITION p2 VALUES [("100"), ("200")),\nPARTITION p3 VALUES [("200"), ("300")),\nPARTITION p4 VALUES [("300"), ("400")),\nPARTITION p5 VALUES [("400"), ("500")),\nPARTITION p6 VALUES [("500"), ("600")),\nPARTITION p7 VALUES [("600"), (MAXVALUE)))\nDISTRIBUTED BY HASH(`user_id`) BUCKETS 2\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"min_load_replica_num" = "-1",\n"is_being_synced" = "false",\n"storage_medium" = "hdd",\n"storage_format" = "V2",\n"inverted_index_storage_format" = "V2",\n"light_schema_change" = "true",\n"disable_auto_compaction" = "false",\n"binlog.enable" = "false",\n"binlog.ttl_seconds" = "86400",\n"binlog.max_bytes" = "9223372036854775807",\n"binlog.max_history_nums" = "9223372036854775807",\n"enable_single_replica_compaction" = "false",\n"group_commit_interval_ms" = "10000",\n"group_commit_data_bytes" = "134217728"\n); - --- !show -- -show_create_table_and_views_like_with_rollup CREATE TABLE `show_create_table_and_views_like_with_rollup` (\n `user_id` largeint NOT NULL,\n `good_id` largeint NOT NULL,\n `cost` bigint SUM NULL DEFAULT "0",\n INDEX index_user_id (`user_id`) USING INVERTED COMMENT 'test index comment',\n INDEX index_good_id (`good_id`) USING INVERTED COMMENT 'test index\\" comment'\n) ENGINE=OLAP\nAGGREGATE KEY(`user_id`, `good_id`)\nPARTITION BY RANGE(`good_id`)\n(PARTITION p1 VALUES [("-170141183460469231731687303715884105728"), ("100")),\nPARTITION p2 VALUES [("100"), ("200")),\nPARTITION p3 VALUES [("200"), ("300")),\nPARTITION p4 VALUES [("300"), ("400")),\nPARTITION p5 VALUES [("400"), ("500")),\nPARTITION p6 VALUES [("500"), ("600")),\nPARTITION p7 VALUES [("600"), (MAXVALUE)))\nDISTRIBUTED BY HASH(`user_id`) BUCKETS 2\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"min_load_replica_num" = "-1",\n"is_being_synced" = "false",\n"storage_medium" = "hdd",\n"storage_format" = "V2",\n"inverted_index_storage_format" = "V2",\n"light_schema_change" = "true",\n"disable_auto_compaction" = "false",\n"binlog.enable" = "false",\n"binlog.ttl_seconds" = "86400",\n"binlog.max_bytes" = "9223372036854775807",\n"binlog.max_history_nums" = "9223372036854775807",\n"enable_single_replica_compaction" = "false",\n"group_commit_interval_ms" = "10000",\n"group_commit_data_bytes" = "134217728"\n); - diff --git a/regression-test/data/unique_with_mow_c_p0/test_schema_change_ck.out b/regression-test/data/unique_with_mow_c_p0/test_schema_change_ck.out new file mode 100644 index 00000000000000..601d1c8370124d --- /dev/null +++ b/regression-test/data/unique_with_mow_c_p0/test_schema_change_ck.out @@ -0,0 +1,341 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_original -- +11 28 38 +10 29 39 + +-- !select_add_c4 -- +11 28 38 \N +10 29 39 \N +13 27 36 40 +12 26 37 40 + +-- !select_add_c5 -- +11 \N 28 38 \N +10 \N 29 39 \N +13 \N 27 36 40 +12 \N 26 37 40 +15 50 20 34 40 +14 50 20 35 40 + +-- !select_add_c6 -- +11 \N 28 \N 38 \N +10 \N 29 \N 39 \N +13 \N 27 \N 36 40 +12 \N 26 \N 37 40 +15 50 20 \N 34 40 +14 50 20 \N 35 40 +17 50 20 60 32 40 +16 50 20 60 33 40 + +-- !select_add_k2 -- +11 \N \N 28 \N 38 \N +10 \N \N 29 \N 39 \N +13 \N \N 27 \N 36 40 +12 \N \N 26 \N 37 40 +15 \N 50 20 \N 34 40 +14 \N 50 20 \N 35 40 +17 \N 50 20 60 32 40 +16 \N 50 20 60 33 40 +19 200 \N 20 \N 30 \N +18 200 \N 20 \N 31 \N + +-- !select_drop_c4 -- +11 \N \N 28 \N 38 +10 \N \N 29 \N 39 +13 \N \N 27 \N 36 +12 \N \N 26 \N 37 +15 \N 50 20 \N 34 +14 \N 50 20 \N 35 +17 \N 50 20 60 32 +16 \N 50 20 60 33 +19 200 \N 20 \N 30 +18 200 \N 20 \N 31 +119 200 \N 20 \N 30 +118 200 \N 20 \N 31 + +-- !select_drop_c5 -- +11 \N 28 \N 38 +10 \N 29 \N 39 +13 \N 27 \N 36 +12 \N 26 \N 37 +15 \N 20 \N 34 +14 \N 20 \N 35 +17 \N 20 60 32 +16 \N 20 60 33 +19 200 20 \N 30 +18 200 20 \N 31 +119 200 20 \N 30 +118 200 20 \N 31 +117 200 20 \N 32 +116 200 20 \N 33 + +-- !select_drop_c6 -- +11 \N 28 38 +10 \N 29 39 +13 \N 27 36 +12 \N 26 37 +15 \N 20 34 +14 \N 20 35 +17 \N 20 32 +16 \N 20 33 +19 200 20 30 +18 200 20 31 +119 200 20 30 +118 200 20 31 +117 200 20 32 +116 200 20 33 +115 200 25 34 +114 200 24 35 + +-- !select_reorder -- +11 \N 38 28 +10 \N 39 29 +13 \N 36 27 +12 \N 37 26 +15 \N 34 20 +14 \N 35 20 +17 \N 32 20 +16 \N 33 20 +19 200 30 20 +18 200 31 20 +119 200 30 20 +118 200 31 20 +117 200 32 20 +116 200 33 20 +115 200 34 25 +114 200 35 24 +113 200 36 23 +112 200 37 22 + +-- !select_modify_k2 -- +11 \N 38 28 +10 \N 39 29 +13 \N 36 27 +12 \N 37 26 +15 \N 34 20 +14 \N 35 20 +17 \N 32 20 +16 \N 33 20 +19 200 30 20 +18 200 31 20 +119 200 30 20 +118 200 31 20 +117 200 32 20 +116 200 33 20 +115 200 34 25 +114 200 35 24 +113 200 36 23 +112 200 37 22 +111 200 38 21 +110 200 39 20 + +-- !select_create_mv_base -- +11 \N 38 28 +10 \N 39 29 +13 \N 36 27 +12 \N 37 26 +15 \N 34 20 +14 \N 35 20 +17 \N 32 20 +16 \N 33 20 +19 200 30 20 +18 200 31 20 +119 200 30 20 +118 200 31 20 +117 200 32 20 +116 200 33 20 +115 200 34 25 +114 200 35 24 +113 200 36 23 +112 200 37 22 +111 200 38 21 +110 200 39 20 +211 200 38 21 +210 200 39 20 + +-- !select_create_mv_mv -- +10 39 +11 38 +12 37 +13 36 +14 35 +15 34 +16 33 +17 32 +18 31 +19 30 +118 31 +119 30 +116 33 +117 32 +114 35 +115 34 +112 37 +113 36 +110 39 +111 38 +210 39 +211 38 + +-- !select_create_rollup_base -- +11 \N 38 28 +10 \N 39 29 +13 \N 36 27 +12 \N 37 26 +15 \N 34 20 +14 \N 35 20 +17 \N 32 20 +16 \N 33 20 +19 200 30 20 +18 200 31 20 +119 200 30 20 +118 200 31 20 +117 200 32 20 +116 200 33 20 +115 200 34 25 +114 200 35 24 +113 200 36 23 +112 200 37 22 +111 200 38 21 +110 200 39 20 +211 200 38 21 +210 200 39 20 +311 200 38 21 +310 200 39 20 + +-- !select_create_rollup_roll -- +\N 10 29 +\N 11 28 +\N 12 26 +\N 13 27 +\N 14 20 +\N 15 20 +\N 16 20 +\N 17 20 +200 18 20 +200 19 20 +200 118 20 +200 119 20 +200 116 20 +200 117 20 +200 114 24 +200 115 25 +200 112 22 +200 113 23 +200 110 20 +200 111 21 +200 210 20 +200 211 21 +200 310 20 +200 311 21 + +-- !select_add_partition -- +10011 200 38 21 +10010 200 39 20 + +-- !select_truncate -- +13 \N 36 27 +12 \N 37 26 +11 \N 38 28 +10 \N 39 29 + +-- !select_rollup_base -- +12 22 31 41 51 +11 21 32 42 52 + +-- !select_rollup_roll -- +21 11 42 32 +22 12 41 31 + +-- !select_rollup_base_sc -- +12 22 31 41 51 +11 21 32 42 52 + +-- !select_rollup_roll_sc -- +21 11 42 32 +22 12 41 31 + +-- !select_rollup_base_sc1 -- +12 22 31 41 51 +11 21 32 42 52 +14 24 33 43 53 +13 23 34 44 54 + +-- !select_rollup_roll_sc1 -- +21 11 42 32 +22 12 41 31 +23 13 44 34 +24 14 43 33 + +-- !select_restore_base2 -- +12 22 31 41 51 +11 21 32 42 52 +14 24 33 43 53 +13 23 34 44 54 +16 26 33 43 53 +15 25 34 44 54 + +-- !select_restore_roll2 -- +21 11 42 32 +22 12 41 31 +23 13 44 34 +24 14 43 33 +25 15 44 34 +26 16 43 33 + +-- !select_restore_base -- +12 22 31 41 51 +11 21 32 42 52 +14 24 33 43 53 +13 23 34 44 54 + +-- !select_restore_roll -- +21 11 42 32 +22 12 41 31 +23 13 44 34 +24 14 43 33 + +-- !select_restore_base1 -- +12 22 31 41 51 +11 21 32 42 52 +14 24 33 43 53 +13 23 34 44 54 +18 28 33 43 53 +17 27 34 44 54 + +-- !select_restore_roll1 -- +21 11 42 32 +22 12 41 31 +23 13 44 34 +24 14 43 33 +27 17 44 34 +28 18 43 33 + +-- !select_restore_base2 -- +12 22 31 41 51 +11 21 32 42 52 +14 24 33 43 53 +13 23 34 44 54 + +-- !select_restore_roll2 -- +21 11 42 32 +22 12 41 31 +23 13 44 34 +24 14 43 33 + +-- !select_restore_base3 -- +12 22 31 41 51 +11 21 32 42 52 +14 24 33 43 53 +13 23 34 44 54 +18 28 33 43 53 +17 27 34 44 54 + +-- !select_restore_roll4 -- +21 11 42 32 +22 12 41 31 +23 13 44 34 +24 14 43 33 +27 17 44 34 +28 18 43 33 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge1.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge1.csv new file mode 100644 index 00000000000000..24364822bccb0f --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge1.csv @@ -0,0 +1,6 @@ +1,10,0 +2,20,1 +3,30,0 +4,40,1 +10,999,0 +11,888,1 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge2.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge2.csv new file mode 100644 index 00000000000000..8292fab318056e --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge2.csv @@ -0,0 +1,2 @@ +1,11 +5,50 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge3.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge3.csv new file mode 100644 index 00000000000000..2aad468b2970ff --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge3.csv @@ -0,0 +1,6 @@ +1,10,2,0 +2,20,2,1 +3,30,2,0 +4,40,2,1 +10,999,2,0 +11,888,2,1 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge4.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge4.csv new file mode 100644 index 00000000000000..cec571fcb9df49 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge4.csv @@ -0,0 +1,2 @@ +1,11,4 +5,50,4 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge5.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge5.csv new file mode 100644 index 00000000000000..4170f9d8b26370 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge5.csv @@ -0,0 +1,2 @@ +6,1 +11,0 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge6.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge6.csv new file mode 100644 index 00000000000000..a8a7c5a2414a6b --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge6.csv @@ -0,0 +1,2 @@ +7 +11 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge7.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge7.csv new file mode 100644 index 00000000000000..8e29be1395cd50 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge7.csv @@ -0,0 +1,2 @@ +6,3,1 +11,3,0 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/merge8.csv b/regression-test/data/unique_with_mow_p0/partial_update/merge8.csv new file mode 100644 index 00000000000000..20bbd5c6142536 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/merge8.csv @@ -0,0 +1,2 @@ +7,5 +11,5 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/schema_change/load_with_key_column.csv b/regression-test/data/unique_with_mow_p0/partial_update/schema_change/load_with_key_column.csv index 128ecf3838d184..75456df9f6c1ad 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/schema_change/load_with_key_column.csv +++ b/regression-test/data/unique_with_mow_p0/partial_update/schema_change/load_with_key_column.csv @@ -1 +1 @@ -1, 1 \ No newline at end of file +2, 2 \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_new_partial_update_delete.out b/regression-test/data/unique_with_mow_p0/partial_update/test_new_partial_update_delete.out index 36507eff4fdc01..f4b7ff22d7f7ad 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_new_partial_update_delete.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_new_partial_update_delete.out @@ -25,32 +25,6 @@ -- !sql15 -- 2 3 2 2 2 --- !sql21 -- -1 1 1 1 1 - --- !sql22 -- - --- !sql23 -- -1 \N \N \N \N 1 - --- !sql24 -- -1 2 \N \N \N - --- !sql31 -- -1 2 \N \N \N -2 2 2 2 2 - --- !sql32 -- - --- !sql33 -- - --- !sql34 -- -1 2 \N \N \N -2 2 2 2 2 - --- !sql35 -- -2 3 2 2 2 - -- !sql1 -- 1 1 1 1 1 @@ -77,29 +51,3 @@ -- !sql15 -- 2 3 2 2 2 --- !sql21 -- -1 1 1 1 1 - --- !sql22 -- - --- !sql23 -- -1 \N \N \N \N 1 - --- !sql24 -- -1 2 \N \N \N - --- !sql31 -- -1 2 \N \N \N -2 2 2 2 2 - --- !sql32 -- - --- !sql33 -- - --- !sql34 -- -1 2 \N \N \N -2 2 2 2 2 - --- !sql35 -- -2 3 2 2 2 - diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_after_delete.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_after_delete.out index a7b2a03456ab05..da2221b5db99a8 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_after_delete.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_after_delete.out @@ -2,12 +2,6 @@ -- !select1 -- 1 2 \N --- !select2 -- -1 2 \N - -- !select1 -- 1 2 \N --- !select2 -- -1 2 \N - diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.out index d157f501a8b4b7..19b192c95facb9 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.out @@ -35,39 +35,3 @@ doris9 3 888 888 30 4 40 40 40 --- !select_1 -- -doris1 -doris2 -doris3 -doris4 - --- !select_2 -- -4 - --- !select_3 -- -"doris10" -"doris11" -doris1 -doris2 -doris3 -doris4 -doris5 -doris7 -doris8 -doris9 - --- !select_4 -- -10 - --- !select_5 -- -1 10 10 10 -2 20 20 20 -3 30 30 30 -4 40 40 40 - --- !select_6 -- -1 99 99 10 -2 888 888 20 -3 888 888 30 -4 40 40 40 - diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.out index fd4ae1289281a4..d7d2504fc09c0d 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.out @@ -37,41 +37,3 @@ t1 999 2 888 t2 \N 20 30 t3 123 456 789 --- !sql -- -t1 1 \N \N - --- !sql -- -t1 1 2 \N - --- !sql -- -t1 1 2 \N -t2 \N 20 30 - --- !sql -- -t1 999 2 888 -t2 \N 20 30 - --- !sql -- -t1 999 2 888 -t2 \N 20 30 -t3 123 456 789 - --- !sql -- -t1 1 \N \N - --- !sql -- -t1 1 2 \N - --- !sql -- -t1 1 2 \N -t2 \N 20 30 - --- !sql -- -t1 999 2 888 -t2 \N 20 30 - --- !sql -- -t1 999 2 888 -t2 \N 20 30 -t3 123 456 789 - diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out index 7a639b8d6e29db..76324798d58df4 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_delete.out @@ -195,199 +195,3 @@ 5 5 5 5 5 6 6 6 6 6 --- !sql1 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 - --- !sql1 -- -2 2 2 2 2 -4 4 4 4 4 -5 5 5 5 5 - --- !sql1 -- -4 4 4 4 4 -5 5 5 5 5 - --- !with_delete_sign1 -- -1 \N \N 0 \N 1 -1 1 1 1 1 0 -2 \N \N 0 \N 1 -2 2 2 2 2 0 -3 \N \N 0 \N 1 -3 3 3 3 3 0 -4 4 4 4 4 0 -5 5 5 5 5 0 - --- !sql2 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql2 -- -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql2 -- -5 5 5 5 5 -6 6 6 6 6 - --- !sql2 -- -1 \N \N 0 \N 1 -1 1 1 1 1 0 -2 \N \N 0 \N 1 -2 2 2 2 2 0 -3 \N \N 0 \N 1 -3 3 3 3 3 0 -4 \N \N 0 \N 1 -4 4 4 4 4 0 -5 5 5 5 5 0 -6 6 6 6 6 0 -7 \N \N 0 \N 1 -8 \N \N 0 \N 1 -9 \N \N 0 \N 1 - --- !sql3 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql3 -- -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql3 -- -5 5 5 5 5 -6 6 6 6 6 - --- !sql3 -- -1 1 1 1 1 0 1 -1 1 1 1 1 1 1 -2 2 2 2 2 0 2 -2 2 2 2 2 1 2 -3 3 3 3 3 0 3 -3 3 3 3 3 1 3 -4 4 4 4 4 0 4 -4 4 4 4 4 1 4 -5 5 5 5 5 0 5 -6 6 6 6 6 0 6 -7 \N \N 0 \N 1 \N -8 \N \N 0 \N 1 \N -9 \N \N 0 \N 1 \N - --- !sql4 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql1 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 - --- !sql1 -- -2 2 2 2 2 -4 4 4 4 4 -5 5 5 5 5 - --- !sql1 -- -4 4 4 4 4 -5 5 5 5 5 - --- !with_delete_sign1 -- -1 \N \N 0 \N 1 -1 1 1 1 1 0 -2 \N \N 0 \N 1 -2 2 2 2 2 0 -3 \N \N 0 \N 1 -3 3 3 3 3 0 -4 4 4 4 4 0 -5 5 5 5 5 0 - --- !sql2 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql2 -- -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql2 -- -5 5 5 5 5 -6 6 6 6 6 - --- !sql2 -- -1 \N \N 0 \N 1 -1 1 1 1 1 0 -2 \N \N 0 \N 1 -2 2 2 2 2 0 -3 \N \N 0 \N 1 -3 3 3 3 3 0 -4 \N \N 0 \N 1 -4 4 4 4 4 0 -5 5 5 5 5 0 -6 6 6 6 6 0 -7 \N \N 0 \N 1 -8 \N \N 0 \N 1 -9 \N \N 0 \N 1 - --- !sql3 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql3 -- -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - --- !sql3 -- -5 5 5 5 5 -6 6 6 6 6 - --- !sql3 -- -1 1 1 1 1 0 1 -1 1 1 1 1 1 1 -2 2 2 2 2 0 2 -2 2 2 2 2 1 2 -3 3 3 3 3 0 3 -3 3 3 3 3 1 3 -4 4 4 4 4 0 4 -4 4 4 4 4 1 4 -5 5 5 5 5 0 5 -6 6 6 6 6 0 6 -7 \N \N 0 \N 1 \N -8 \N \N 0 \N 1 \N -9 \N \N 0 \N 1 \N - --- !sql4 -- -1 1 1 1 1 -2 2 2 2 2 -3 3 3 3 3 -4 4 4 4 4 -5 5 5 5 5 -6 6 6 6 6 - diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out new file mode 100644 index 00000000000000..04e4b07f2dce3f --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out @@ -0,0 +1,341 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 + +-- !sql_1_1 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 +10 \N 999 \N + +-- !sql_1_2 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !sql_2_1 -- +0 0 0 0 +3 3 30 3 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !sql_2_2 -- +0 0 0 0 +3 3 30 3 +8 8 8 8 +10 \N 999 \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 + +-- !sql_3_1 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 +10 \N 999 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +6 6 6 6 1 0 +7 7 7 7 1 0 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 + +-- !sql_3_2 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +6 6 6 6 1 0 +6 6 6 6 3 1 +7 7 7 7 1 0 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 +11 \N 888 \N 3 0 + +-- !sql_4_1 -- +0 0 0 0 +3 3 30 3 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +1 1 11 1 4 1 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +5 5 50 5 4 1 +6 6 6 6 1 0 +6 6 6 6 3 1 +7 7 7 7 1 0 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 +11 \N 888 \N 3 0 + +-- !sql_4_2 -- +0 0 0 0 +3 3 30 3 +8 8 8 8 +10 \N 999 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +1 1 11 1 4 1 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +5 5 50 5 4 1 +6 6 6 6 1 0 +6 6 6 6 3 1 +7 7 7 7 1 0 +7 7 7 7 5 1 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 +11 \N 888 \N 3 0 +11 \N 888 \N 5 1 + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 + +-- !sql_1_1 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 +10 \N 999 \N + +-- !sql_1_2 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !sql_2_1 -- +0 0 0 0 +3 3 30 3 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !sql_2_2 -- +0 0 0 0 +3 3 30 3 +8 8 8 8 +10 \N 999 \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 + +-- !sql_3_1 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 +10 \N 999 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +6 6 6 6 1 0 +7 7 7 7 1 0 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 + +-- !sql_3_2 -- +0 0 0 0 +1 1 10 1 +3 3 30 3 +5 5 5 5 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +6 6 6 6 1 0 +6 6 6 6 3 1 +7 7 7 7 1 0 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 +11 \N 888 \N 3 0 + +-- !sql_4_1 -- +0 0 0 0 +3 3 30 3 +7 7 7 7 +8 8 8 8 +10 \N 999 \N +11 \N 888 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +1 1 11 1 4 1 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +5 5 50 5 4 1 +6 6 6 6 1 0 +6 6 6 6 3 1 +7 7 7 7 1 0 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 +11 \N 888 \N 3 0 + +-- !sql_4_2 -- +0 0 0 0 +3 3 30 3 +8 8 8 8 +10 \N 999 \N + +-- !inspect -- +0 0 0 0 1 0 +1 1 1 1 1 0 +1 1 10 1 2 0 +1 1 11 1 4 1 +2 2 2 2 1 0 +2 2 20 2 2 1 +3 3 3 3 1 0 +3 3 30 3 2 0 +4 4 4 4 1 0 +4 4 40 4 2 1 +5 5 5 5 1 0 +5 5 50 5 4 1 +6 6 6 6 1 0 +6 6 6 6 3 1 +7 7 7 7 1 0 +7 7 7 7 5 1 +8 8 8 8 1 0 +10 \N 999 \N 2 0 +11 \N 888 \N 2 1 +11 \N 888 \N 3 0 +11 \N 888 \N 5 1 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_mow_with_sync_mv.csv b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_mow_with_sync_mv.csv new file mode 100644 index 00000000000000..6e745e19bcad17 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_mow_with_sync_mv.csv @@ -0,0 +1 @@ +2,3,2,1,'2023-10-18','k' \ No newline at end of file diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out index 3c2ae8804c1e4f..1997e00c8b1c66 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.out @@ -141,15 +141,3 @@ 10000 2017-10-01 2017-10-01T08:00:05 北京 20 0 2017-10-01T06:00 20 10 10 10000 2017-10-01 2017-10-01T09:00:05 北京 20 0 2017-10-01T07:00 15 2 2 --- !sql -- -10000 2017-10-01 2017-10-01T08:00:05 北京 20 0 2017-10-01T06:00 20 10 10 -10000 2017-10-01 2017-10-01T09:00:05 北京 20 0 2017-10-01T07:00 15 2 2 - --- !sql -- -10000 2017-10-01 2017-10-01T08:00:05 北京 20 0 2017-10-01T06:00 20 10 10 -10000 2017-10-01 2017-10-01T09:00:05 北京 20 0 2017-10-01T07:00 15 2 2 - --- !sql -- -10000 2017-10-01 2017-10-01T08:00:05 北京 20 0 2017-10-01T06:00 20 10 10 -10000 2017-10-01 2017-10-01T09:00:05 北京 20 0 2017-10-01T07:00 15 2 2 - diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_only_keys.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_only_keys.out new file mode 100644 index 00000000000000..9ec27274af42fd --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_only_keys.out @@ -0,0 +1,59 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +4 \N \N \N +5 \N \N \N +6 \N \N \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +4 \N \N \N +5 \N \N \N +6 \N \N \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +4 \N \N \N +5 \N \N \N +6 \N \N \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +4 \N \N \N +5 \N \N \N +6 \N \N \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +4 \N \N \N +5 \N \N \N +6 \N \N \N + +-- !sql -- +0 0 0 0 +1 1 1 1 +2 2 2 2 +4 \N \N \N +5 \N \N \N +6 \N \N \N + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_map_col.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_map_col.out new file mode 100644 index 00000000000000..4fab559a4337b4 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_map_col.out @@ -0,0 +1,95 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql1 -- +1 1 \N +2 2 \N +3 3 \N +4 4 \N + +-- !sql1 -- +1 1 20 +2 2 20 +3 3 \N +4 4 \N + +-- !sql1 -- +1 1 20 +2 2 20 +3 3 \N +4 4 \N + +-- !sql1 -- +3 3 2099-09-10T12:00:00.977174 \N 2099-09-10T12:00:00.977174 +4 4 2099-09-10T12:00:00.977174 \N 2099-09-10T12:00:00.977174 + +-- !sql2 -- +1 1 +2 2 +3 3 +4 4 + +-- !sql3 -- +1 1 999 999 +2 2 999 999 +3 3 999 999 +4 4 999 999 + +-- !sql3 -- +1 99 8888 8888 +2 99 8888 8888 +3 3 999 999 +4 4 999 999 +5 99 8888 8888 + +-- !sql4 -- +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N + +-- !sql1 -- +1 1 \N +2 2 \N +3 3 \N +4 4 \N + +-- !sql1 -- +1 1 20 +2 2 20 +3 3 \N +4 4 \N + +-- !sql1 -- +1 1 20 +2 2 20 +3 3 \N +4 4 \N + +-- !sql1 -- +3 3 2099-09-10T12:00:00.977174 \N 2099-09-10T12:00:00.977174 +4 4 2099-09-10T12:00:00.977174 \N 2099-09-10T12:00:00.977174 + +-- !sql2 -- +1 1 +2 2 +3 3 +4 4 + +-- !sql3 -- +1 1 999 999 +2 2 999 999 +3 3 999 999 +4 4 999 999 + +-- !sql3 -- +1 99 8888 8888 +2 99 8888 8888 +3 3 999 999 +4 4 999 999 +5 99 8888 8888 + +-- !sql4 -- +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N + diff --git a/regression-test/data/update/test_unique_table_update.out b/regression-test/data/update/test_unique_table_update.out index d0c0dda6ad2671..988141c2b4c265 100644 --- a/regression-test/data/update/test_unique_table_update.out +++ b/regression-test/data/update/test_unique_table_update.out @@ -14,13 +14,3 @@ 2 1 1 2 2 3 3 3 3 3 --- !select_4 -- -1 1 1 1 1 -2 1 1 2 2 -3 3 3 3 3 - --- !select_5 -- -1 1 1 1 1 -2 1 1 2 2 -3 3 3 3 3 - diff --git a/regression-test/data/update/test_update_mow.out b/regression-test/data/update/test_update_mow.out index 625e827526fcdd..c79767d0657f46 100644 --- a/regression-test/data/update/test_update_mow.out +++ b/regression-test/data/update/test_update_mow.out @@ -23,33 +23,3 @@ date_value date Yes false \N NONE 2 20 2 2000.0 2000-01-02 3 3 3 3.0 2000-01-03 --- !sql -- -a 1 2023-11-12T00:00 test1 1 -b 2 2023-11-12T00:00 test2 2 -c 3 2023-11-12T00:00 test3 3 - --- !sql -- -a 1 2023-11-12T00:00 test1 999 -b 2 2023-11-12T00:00 test2 2 -c 3 2023-11-12T00:00 test3 3 - --- !sql -- -a 1 2023-11-12T00:00 test1 999 -b 2 2023-11-12T00:00 test2 2 -c 3 2022-01-01T00:00 update value 3 - --- !sql -- -a 1 2023-11-12T00:00 test1 1 -b 2 2023-11-12T00:00 test2 2 -c 3 2023-11-12T00:00 test3 3 - --- !sql -- -a 1 2023-11-12T00:00 test1 999 -b 2 2023-11-12T00:00 test2 2 -c 3 2023-11-12T00:00 test3 3 - --- !sql -- -a 1 2023-11-12T00:00 test1 999 -b 2 2023-11-12T00:00 test2 2 -c 3 2022-01-01T00:00 update value 3 - diff --git a/regression-test/data/view_p0/create_view_star_except_and_cast_to_sql.out b/regression-test/data/view_p0/create_view_star_except_and_cast_to_sql.out index ede8faef76f8d7..3b8ebb65805049 100644 --- a/regression-test/data/view_p0/create_view_star_except_and_cast_to_sql.out +++ b/regression-test/data/view_p0/create_view_star_except_and_cast_to_sql.out @@ -7,8 +7,8 @@ 6 6 -- !test_select_star_except_sql -- -v_mal_old_create_view CREATE VIEW `v_mal_old_create_view` AS SELECT `regression_test_view_p0`.`mal_old_create_view`.`pk` AS `pk`, `regression_test_view_p0`.`mal_old_create_view`.`b` AS `b` FROM `regression_test_view_p0`.`mal_old_create_view`; utf8mb4 utf8mb4_0900_bin +v_mal_old_create_view CREATE VIEW `v_mal_old_create_view` AS select `internal`.`regression_test_view_p0`.`mal_old_create_view`.`pk`, `internal`.`regression_test_view_p0`.`mal_old_create_view`.`b` from `internal`.`regression_test_view_p0`.`mal_old_create_view`; utf8mb4 utf8mb4_0900_bin -- !test_sql -- -v_mal_old_create_view2 CREATE VIEW `v_mal_old_create_view2` AS SELECT CAST(CAST(`a` AS text) AS time(0)) AS `__cast_expr_0` FROM `regression_test_view_p0`.`mal_old_create_view`; utf8mb4 utf8mb4_0900_bin +v_mal_old_create_view2 CREATE VIEW `v_mal_old_create_view2` AS select cast(cast(`internal`.`regression_test_view_p0`.`mal_old_create_view`.`a` as string) as time) from `internal`.`regression_test_view_p0`.`mal_old_create_view`; utf8mb4 utf8mb4_0900_bin diff --git a/regression-test/data/view_p0/view_p0.out b/regression-test/data/view_p0/view_p0.out index 976d4a3cb2ad07..21d23110a91969 100644 --- a/regression-test/data/view_p0/view_p0.out +++ b/regression-test/data/view_p0/view_p0.out @@ -18,3 +18,9 @@ -- !sql2 -- +-- !select_aes -- +17777208882 + +-- !show_aes -- +test_view_aes CREATE VIEW `test_view_aes` AS SELECT aes_decrypt(from_base64("EXp7k7M9Zv1mIwPpno28Hg=="), '17IMZrGdwWf2Piy8', 'II2HLtihr5TQpQgR', 'AES_128_CBC'); utf8mb4 utf8mb4_0900_bin + diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy index 0042aa69a0aded..028bcc71877962 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy @@ -121,6 +121,7 @@ class Config { public Integer actionParallel public Integer times public boolean withOutLoadData + public boolean runNonConcurrent public String caseNamePrefix public boolean isSmokeTest public String multiClusterBes @@ -474,6 +475,7 @@ class Config { config.randomOrder = cmd.hasOption(randomOrderOpt) config.stopWhenFail = cmd.hasOption(stopWhenFailOpt) config.withOutLoadData = cmd.hasOption(withOutLoadDataOpt) + config.runNonConcurrent = Boolean.parseBoolean(cmd.getOptionValue(runNonConcurrentOpt, "True")) config.caseNamePrefix = cmd.getOptionValue(caseNamePrefixOpt, config.caseNamePrefix) config.dryRun = cmd.hasOption(dryRunOpt) config.isSmokeTest = cmd.hasOption(isSmokeTestOpt) @@ -481,6 +483,7 @@ class Config { log.info("randomOrder is ${config.randomOrder}".toString()) log.info("stopWhenFail is ${config.stopWhenFail}".toString()) log.info("withOutLoadData is ${config.withOutLoadData}".toString()) + log.info("runNonConcurrent is ${config.runNonConcurrent}".toString()) log.info("caseNamePrefix is ${config.caseNamePrefix}".toString()) log.info("dryRun is ${config.dryRun}".toString()) def s3SourceList = ["aliyun", "aliyun-internal", "tencent", "huawei", "azure", "gcp"] diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy index 67322287d07aa5..5b220949168747 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy @@ -73,6 +73,7 @@ class ConfigOptions { static Option stopWhenFailOpt static Option timesOpt static Option withOutLoadDataOpt + static Option runNonConcurrentOpt static Option caseNamePrefixOpt static Option dryRunOpt static Option isSmokeTestOpt @@ -467,6 +468,11 @@ class ConfigOptions { .longOpt("withOutLoadData") .desc("do not run load.groovy to reload data to Doris.") .build() + runNonConcurrentOpt = Option.builder("runNonConcurrent") + .required(false) + .hasArg(true) + .desc("whether run non-concurrent tests") + .build() caseNamePrefixOpt = Option.builder("cnp") .required(false) .hasArg(true) @@ -622,6 +628,7 @@ class ConfigOptions { .addOption(stopWhenFailOpt) .addOption(timesOpt) .addOption(withOutLoadDataOpt) + .addOption(runNonConcurrentOpt) .addOption(caseNamePrefixOpt) .addOption(dryRunOpt) .addOption(isSmokeTestOpt) diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/RegressionTest.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/RegressionTest.groovy index a0cc8ba2ea12c4..65def1c50f1109 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/RegressionTest.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/RegressionTest.groovy @@ -274,6 +274,10 @@ class RegressionTest { } } + if (!config.runNonConcurrent) { + return + } + log.info('Start to run single scripts') futures.clear() scriptSources.eachWithIndex { source, i -> diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy index 5ad40f9df51260..65c65f85c032bb 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy @@ -66,6 +66,7 @@ import java.util.concurrent.ExecutorService import java.util.concurrent.Executors import java.util.concurrent.Future import java.util.concurrent.ThreadFactory +import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicBoolean import java.util.stream.Collectors import java.util.stream.LongStream @@ -833,6 +834,11 @@ class Suite implements GroovyInterceptable { return s3Url } + String getJdbcPassword() { + String sk = context.config.otherConfigs.get("jdbcPassword"); + return sk + } + static void scpFiles(String username, String host, String files, String filePath, boolean fromDst=true) { String cmd = "scp -o StrictHostKeyChecking=no -r ${username}@${host}:${files} ${filePath}" if (!fromDst) { @@ -937,8 +943,13 @@ class Suite implements GroovyInterceptable { return sql_return_maparray("show frontends").collect { it.Host + ":" + it.HttpPort }; } + List getFrontendIpEditlogPort() { + return sql_return_maparray("show frontends").collect { it.Host + ":" + it.EditLogPort }; + } + void getBackendIpHttpPort(Map backendId_to_backendIP, Map backendId_to_backendHttpPort) { List> backends = sql("show backends"); + logger.info("Content of backends: ${backends}") for (List backend : backends) { backendId_to_backendIP.put(String.valueOf(backend[0]), String.valueOf(backend[1])); backendId_to_backendHttpPort.put(String.valueOf(backend[0]), String.valueOf(backend[4])); @@ -946,6 +957,18 @@ class Suite implements GroovyInterceptable { return; } + void getBackendIpHeartbeatPort(Map backendId_to_backendIP, + Map backendId_to_backendHeartbeatPort) { + List> backends = sql("show backends"); + logger.info("Content of backends: ${backends}") + for (List backend : backends) { + backendId_to_backendIP.put(String.valueOf(backend[0]), String.valueOf(backend[1])); + backendId_to_backendHeartbeatPort.put(String.valueOf(backend[0]), String.valueOf(backend[2])); + } + return; + } + + void getBackendIpHttpAndBrpcPort(Map backendId_to_backendIP, Map backendId_to_backendHttpPort, Map backendId_to_backendBrpcPort) { @@ -1276,7 +1299,29 @@ class Suite implements GroovyInterceptable { } logger.info("The state of ${showTasks} is ${status}") Thread.sleep(1000); - } while (timeoutTimestamp > System.currentTimeMillis() && (status == 'PENDING' || status == 'RUNNING' || status == 'NULL')) + } while (timeoutTimestamp > System.currentTimeMillis() && (status == 'PENDING' || status == 'RUNNING' || status == 'NULL')) + if (status != "SUCCESS") { + logger.info("status is not success") + } + Assert.assertEquals("SUCCESS", status) + } + + void waitingMTMVTaskFinishedByMvNameAllowCancel(String mvName) { + Thread.sleep(2000); + String showTasks = "select TaskId,JobId,JobName,MvId,Status,MvName,MvDatabaseName,ErrorMsg from tasks('type'='mv') where MvName = '${mvName}' order by CreateTime ASC" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(4) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status == 'PENDING' || status == 'RUNNING' || status == 'NULL' || status == 'CANCELED')) if (status != "SUCCESS") { logger.info("status is not success") } @@ -1352,29 +1397,26 @@ class Suite implements GroovyInterceptable { } } - def getMVJobState = { tableName, limit -> - def jobStateResult = sql """ SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' ORDER BY CreateTime DESC limit ${limit}""" - if (jobStateResult.size() != limit) { + def getMVJobState = { tableName, rollUpName -> + def jobStateResult = sql """ SHOW ALTER TABLE ROLLUP WHERE TableName='${tableName}' and IndexName = '${rollUpName}' ORDER BY CreateTime DESC limit 1""" + if (jobStateResult == null || jobStateResult.isEmpty()) { logger.info("show alter table roll is empty" + jobStateResult) return "NOT_READY" } - for (int i = 0; i < jobStateResult.size(); i++) { - logger.info("getMVJobState is " + jobStateResult[i][8]) - if (!jobStateResult[i][8].equals("FINISHED")) { - return "NOT_READY" - } + logger.info("getMVJobState jobStateResult is " + jobStateResult.toString()) + if (!jobStateResult[0][8].equals("FINISHED")) { + return "NOT_READY" } return "FINISHED"; } - def waitForRollUpJob = (tbName, timeoutMillisecond, limit) -> { + def waitForRollUpJob = (tbName, rollUpName, timeoutMillisecond) -> { long startTime = System.currentTimeMillis() long timeoutTimestamp = startTime + timeoutMillisecond String result - // time out or has run exceed 10 minute, then break - while (timeoutTimestamp > System.currentTimeMillis() && System.currentTimeMillis() - startTime < 600000){ - result = getMVJobState(tbName, limit) + while (timeoutTimestamp > System.currentTimeMillis()){ + result = getMVJobState(tbName, rollUpName) if (result == "FINISHED") { sleep(200) return @@ -1470,6 +1512,71 @@ class Suite implements GroovyInterceptable { } } + void waitAddFeFinished(String host, int port) { + logger.info("waiting for ${host}:${port}") + Awaitility.await().atMost(60, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).and() + .pollInterval(100, TimeUnit.MILLISECONDS).await().until(() -> { + def frontends = getFrontendIpEditlogPort() + logger.info("frontends ${frontends}") + boolean matched = false + String expcetedFE = "${host}:${port}" + for (frontend: frontends) { + logger.info("checking fe ${frontend}, expectedFe ${expcetedFE}") + if (frontend.equals(expcetedFE)) { + matched = true; + } + } + return matched; + }); + } + + void waitDropFeFinished(String host, int port) { + Awaitility.await().atMost(60, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).and() + .pollInterval(100, TimeUnit.MILLISECONDS).await().until(() -> { + def frontends = getFrontendIpEditlogPort() + boolean matched = false + for (frontend: frontends) { + if (frontend == "$host:$port") { + matched = true + } + } + return !matched; + }); + } + + void waitAddBeFinished(String host, int port) { + logger.info("waiting ${host}:${port} added"); + Awaitility.await().atMost(60, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).and() + .pollInterval(100, TimeUnit.MILLISECONDS).await().until(() -> { + def ipList = [:] + def portList = [:] + getBackendIpHeartbeatPort(ipList, portList) + boolean matched = false + ipList.each { beid, ip -> + if (ip.equals(host) && ((portList[beid] as int) == port)) { + matched = true; + } + } + return matched; + }); + } + + void waitDropBeFinished(String host, int port) { + Awaitility.await().atMost(60, TimeUnit.SECONDS).with().pollDelay(100, TimeUnit.MILLISECONDS).and() + .pollInterval(100, TimeUnit.MILLISECONDS).await().until(() -> { + def ipList = [:] + def portList = [:] + getBackendIpHeartbeatPort(ipList, portList) + boolean matched = false + ipList.each { beid, ip -> + if (ip == host && portList[beid] as int == port) { + matched = true; + } + } + return !matched; + }); + } + void waiteCreateTableFinished(String tableName) { Thread.sleep(2000); String showCreateTable = "SHOW CREATE TABLE ${tableName}" @@ -1556,7 +1663,9 @@ class Suite implements GroovyInterceptable { def mv_rewrite_success_without_check_chosen = { query_sql, mv_name -> explain { sql(" memo plan ${query_sql}") - contains("${mv_name} not chose") + check { result -> + result.contains("${mv_name} chose") || result.contains("${mv_name} not chose") + } } } @@ -1614,7 +1723,9 @@ class Suite implements GroovyInterceptable { explain { sql(" memo plan ${query_sql}") - notContains("${mv_name} fail") + check { result -> + result.contains("${mv_name} chose") || result.contains("${mv_name} not chose") + } } } @@ -1637,8 +1748,7 @@ class Suite implements GroovyInterceptable { explain { sql(" memo plan ${query_sql}") - notContains("${mv_name} chose") - notContains("${mv_name} not chose") + contains("${mv_name} fail") } } @@ -1769,7 +1879,7 @@ class Suite implements GroovyInterceptable { drop_cluster_api.call(js) { respCode, body -> - log.info("dorp cluster resp: ${body} ${respCode}".toString()) + log.info("drop cluster resp: ${body} ${respCode}".toString()) def json = parseJson(body) assertTrue(json.code.equalsIgnoreCase("OK") || json.code.equalsIgnoreCase("ALREADY_EXISTED")) } diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy index 8e4c46a130ae9a..862f437840eccd 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy @@ -39,7 +39,7 @@ class ClusterOptions { Boolean sqlModeNodeMgr = false Boolean beMetaServiceEndpoint = true - Boolean beCloudInstanceId = false + Boolean beClusterId = false int waitTimeout = 180 @@ -322,8 +322,8 @@ class SuiteCluster { if (!options.beMetaServiceEndpoint) { cmd += ['--no-be-metaservice-endpoint'] } - if (!options.beCloudInstanceId) { - cmd += ['--no-be-cloud-instanceid'] + if (!options.beClusterId) { + cmd += ['--no-be-cluster-id'] } cmd += ['--wait-timeout', String.valueOf(options.waitTimeout)] diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteContext.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteContext.groovy index d9268643a444bb..fe875af8a47dc0 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteContext.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteContext.groovy @@ -296,7 +296,7 @@ class SuiteContext implements Closeable { public T connect(String user, String password, String url, Closure actionSupplier) { def originConnection = threadLocalConn.get() try { - log.info("Create new connection for user '${user}'") + log.info("Create new connection for user '${user}' to '${url}'") return DriverManager.getConnection(url, user, password).withCloseable { newConn -> def newConnInfo = new ConnectionInfo() newConnInfo.conn = newConn @@ -306,7 +306,7 @@ class SuiteContext implements Closeable { return actionSupplier.call() } } finally { - log.info("Recover original connection") + log.info("Recover original connection to '${url}'") if (originConnection == null) { threadLocalConn.remove() } else { diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/util/DebugPoint.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/util/DebugPoint.groovy index 8c764eb453d017..7386d896ac3555 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/util/DebugPoint.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/util/DebugPoint.groovy @@ -103,6 +103,10 @@ class DebugPoint { def enableDebugPointForAllBEs(String name, Map params = null) { operateDebugPointForAllBEs({ host, port -> logger.info("enable debug point ${name} with params ${params} for BE $host:$port") + if (port == -1) { + logger.info("skip for BE $host:$port") + return + } enableDebugPoint(host, port, NodeType.BE, name, params) }) } @@ -111,6 +115,10 @@ class DebugPoint { def disableDebugPointForAllBEs(String name) { operateDebugPointForAllBEs { host, port -> logger.info("disable debug point ${name} for BE $host:$port") + if (port == -1) { + logger.info("skip for BE $host:$port") + return + } disableDebugPoint(host, port, NodeType.BE, name) } } @@ -119,6 +127,10 @@ class DebugPoint { def clearDebugPointsForAllBEs() { operateDebugPointForAllBEs { host, port -> logger.info("clear debug point for BE $host:$port") + if (port == -1) { + logger.info("skip for BE $host:$port") + return + } clearDebugPoints(host, port, NodeType.BE) } } @@ -137,6 +149,10 @@ class DebugPoint { def enableDebugPointForAllFEs(String name, Map params = null) { operateDebugPointForAllFEs({ host, port -> logger.info("enable debug point ${name} with params ${params} for FE $host:$port") + if (port == -1) { + logger.info("skip for FE $host:$port") + return + } enableDebugPoint(host, port, NodeType.FE, name, params) }) } @@ -144,6 +160,10 @@ class DebugPoint { def disableDebugPointForAllFEs(String name) { operateDebugPointForAllFEs { host, port -> logger.info("disable debug point ${name} for FE $host:$port") + if (port == -1) { + logger.info("skip for FE $host:$port") + return + } disableDebugPoint(host, port, NodeType.FE, name) } } @@ -151,6 +171,10 @@ class DebugPoint { def clearDebugPointsForAllFEs() { operateDebugPointForAllFEs { host, port -> logger.info("clear debug point for FE $host:$port") + if (port == -1) { + logger.info("skip for FE $host:$port") + return + } clearDebugPoints(host, port, NodeType.FE) } } diff --git a/regression-test/java-udf-src/pom.xml b/regression-test/java-udf-src/pom.xml index 1967e4f0e1fb44..388bf795e1ec4b 100644 --- a/regression-test/java-udf-src/pom.xml +++ b/regression-test/java-udf-src/pom.xml @@ -45,11 +45,13 @@ under the License. hive-exec core ${hive.version} + provided org.apache.hive hive-serde ${hive.version} + provided diff --git a/regression-test/pipeline/cloud_p0/conf/be_custom.conf b/regression-test/pipeline/cloud_p0/conf/be_custom.conf index c8f41b100a7b38..377a02536c6d29 100644 --- a/regression-test/pipeline/cloud_p0/conf/be_custom.conf +++ b/regression-test/pipeline/cloud_p0/conf/be_custom.conf @@ -35,4 +35,5 @@ stream_load_record_batch_size = 500 webserver_num_workers = 128 enable_new_tablet_do_compaction = true arrow_flight_sql_port = 8181 -pipeline_task_leakage_detect_period_sec=1 \ No newline at end of file +pipeline_task_leakage_detect_period_sec=1 +crash_in_memory_tracker_inaccurate = true diff --git a/regression-test/pipeline/cloud_p0/conf/regression-conf-custom.groovy b/regression-test/pipeline/cloud_p0/conf/regression-conf-custom.groovy index 8d6b265a54bf02..30649c4e0ee193 100644 --- a/regression-test/pipeline/cloud_p0/conf/regression-conf-custom.groovy +++ b/regression-test/pipeline/cloud_p0/conf/regression-conf-custom.groovy @@ -25,13 +25,9 @@ excludeSuites = "000_the_start_sentinel_do_not_touch," + // keep this line as th "mv_contain_external_table," + // run on external pipeline "set_replica_status," + // not a case for cloud mode, no need to run "test_be_inject_publish_txn_fail," + // not a case for cloud mode, no need to run - "test_compaction_uniq_cluster_keys_with_delete," + - "test_compaction_uniq_keys_cluster_key," + "test_dump_image," + "test_index_failure_injection," + "test_information_schema_external," + - "test_pk_uk_case_cluster," + - "test_point_query_cluster_key," + "test_profile," + "test_publish_timeout," + "test_refresh_mtmv," + // not supported yet @@ -55,7 +51,6 @@ excludeDirectories = "000_the_start_sentinel_do_not_touch," + // keep this line "cloud_p0/cache," + "workload_manager_p1," + "nereids_rules_p0/subquery," + - "unique_with_mow_c_p0," + "backup_restore," + // not a case for cloud mode, no need to run "cold_heat_separation," + "storage_medium_p0," + diff --git a/regression-test/pipeline/cloud_p0/run.sh b/regression-test/pipeline/cloud_p0/run.sh index d565e16d989870..a78377c95ee017 100644 --- a/regression-test/pipeline/cloud_p0/run.sh +++ b/regression-test/pipeline/cloud_p0/run.sh @@ -71,7 +71,8 @@ run() { --times "${repeat_times_from_trigger:-1}" \ -parallel 18 \ -suiteParallel 18 \ - -actionParallel 10; then + -actionParallel 10 \ + -runNonConcurrent false; then echo else bash "${teamcity_build_checkoutDir}"/regression-test/pipeline/common/get-or-set-tmp-env.sh 'set' "export need_collect_log=true" diff --git a/regression-test/pipeline/cloud_p1/conf/be_custom.conf b/regression-test/pipeline/cloud_p1/conf/be_custom.conf index b0649875178c48..0dc78140ed9d44 100644 --- a/regression-test/pipeline/cloud_p1/conf/be_custom.conf +++ b/regression-test/pipeline/cloud_p1/conf/be_custom.conf @@ -30,4 +30,5 @@ file_cache_path = [{"path":"/data/doris_cloud/file_cache","total_size":104857600 tmp_file_dirs = [{"path":"/data/doris_cloud/tmp","max_cache_bytes":104857600,"max_upload_bytes":104857600}] save_load_error_log_to_s3 = true arrow_flight_sql_port = 8181 -pipeline_task_leakage_detect_period_sec=1 \ No newline at end of file +pipeline_task_leakage_detect_period_sec=1 +crash_in_memory_tracker_inaccurate = true diff --git a/regression-test/pipeline/common/github-utils.sh b/regression-test/pipeline/common/github-utils.sh index 4f44870bb21b0d..242b77b832d6c4 100644 --- a/regression-test/pipeline/common/github-utils.sh +++ b/regression-test/pipeline/common/github-utils.sh @@ -308,6 +308,7 @@ file_changed_cloud_p0() { [[ "${af}" == 'build.sh' ]] || [[ "${af}" == 'env.sh' ]] || [[ "${af}" == 'run-regression-test.sh' ]] || + [[ "${af}" == 'cloud/CMakeLists.txt' ]] || [[ "${af}" == 'cloud/src/'* ]] || [[ "${af}" == 'cloud/cmake/'* ]] || [[ "${af}" == 'cloud/test/'* ]]; then diff --git a/regression-test/pipeline/external/conf/be.conf b/regression-test/pipeline/external/conf/be.conf index a7edbd7b55a59e..19ebc9ee812c21 100644 --- a/regression-test/pipeline/external/conf/be.conf +++ b/regression-test/pipeline/external/conf/be.conf @@ -66,4 +66,5 @@ enable_jvm_monitor = true KRB5_CONFIG=/keytabs/krb5.conf kerberos_krb5_conf_path=/keytabs/krb5.conf -pipeline_task_leakage_detect_period_sec=1 \ No newline at end of file +pipeline_task_leakage_detect_period_sec=1 +crash_in_memory_tracker_inaccurate = true diff --git a/regression-test/pipeline/external/conf/fe.conf b/regression-test/pipeline/external/conf/fe.conf index f52baed99c267c..92a6184fd92c75 100644 --- a/regression-test/pipeline/external/conf/fe.conf +++ b/regression-test/pipeline/external/conf/fe.conf @@ -98,7 +98,7 @@ auth_token = 5ff161c3-2c08-4079-b108-26c8850b6598 infodb_support_ext_catalog=true trino_connector_plugin_dir=/tmp/trino_connector/connectors -hms_events_polling_interval_ms=2000 +hms_events_polling_interval_ms=700 KRB5_CONFIG=/keytabs/krb5.conf diff --git a/regression-test/pipeline/p0/conf/be.conf b/regression-test/pipeline/p0/conf/be.conf index e4745ccb5a332f..c5c8104ecf1279 100644 --- a/regression-test/pipeline/p0/conf/be.conf +++ b/regression-test/pipeline/p0/conf/be.conf @@ -69,4 +69,5 @@ enable_jvm_monitor = true enable_be_proc_monitor = true be_proc_monitor_interval_ms = 30000 webserver_num_workers = 128 -pipeline_task_leakage_detect_period_sec=1 \ No newline at end of file +pipeline_task_leakage_detect_period_sec=1 +crash_in_memory_tracker_inaccurate = true diff --git a/regression-test/pipeline/p1/conf/be.conf b/regression-test/pipeline/p1/conf/be.conf index 1c0fd53d4958ca..01510e6422b975 100644 --- a/regression-test/pipeline/p1/conf/be.conf +++ b/regression-test/pipeline/p1/conf/be.conf @@ -62,3 +62,4 @@ enable_missing_rows_correctness_check=true enable_jvm_monitor = true pipeline_task_leakage_detect_period_sec=1 +crash_in_memory_tracker_inaccurate = true diff --git a/regression-test/suites/account_p0/test_nereids_row_policy.groovy b/regression-test/suites/account_p0/test_nereids_row_policy.groovy index 6ae858997b11a0..ea7cf99fb28840 100644 --- a/regression-test/suites/account_p0/test_nereids_row_policy.groovy +++ b/regression-test/suites/account_p0/test_nereids_row_policy.groovy @@ -21,11 +21,8 @@ suite("test_nereids_row_policy") { def user='row_policy_user' def tokens = context.config.jdbcUrl.split('/') def url=tokens[0] + "//" + tokens[2] + "/" + dbName + "?" - def isCloudMode = { - def ret = sql_return_maparray """show backends""" - ret.Tag[0].contains("cloud_cluster_name") - } - def cloudMode = isCloudMode.call() + + def cloudMode = isCloudMode() //cloud-mode if (cloudMode) { def clusters = sql " SHOW CLUSTERS; " @@ -35,25 +32,16 @@ suite("test_nereids_row_policy") { } def assertQueryResult = { size -> - def result1 = connect(user=user, password='123abc!@#', url=url) { - sql "set enable_nereids_planner = false" - sql "SELECT * FROM ${tableName}" - } - def result2 = connect(user=user, password='123abc!@#', url=url) { - sql "set enable_nereids_planner = true" - sql "set enable_fallback_to_original_planner = false" + def result = connect(user=user, password='123abc!@#', url=url) { sql "SELECT * FROM ${tableName}" } connect(user=user, password='123abc!@#', url=url) { - sql "set enable_nereids_planner = true" - sql "set enable_fallback_to_original_planner = false" test { sql "SELECT * FROM ${viewName}" exception "does not have privilege for" } } - assertEquals(size, result1.size()) - assertEquals(size, result2.size()) + assertEquals(size, result.size()) } def createPolicy = { name, predicate, type -> diff --git a/regression-test/suites/audit/test_audit_log_behavior.groovy b/regression-test/suites/audit/test_audit_log_behavior.groovy index 1c30a38ac69c0e..2829474560e1cf 100644 --- a/regression-test/suites/audit/test_audit_log_behavior.groovy +++ b/regression-test/suites/audit/test_audit_log_behavior.groovy @@ -83,11 +83,12 @@ suite("test_audit_log_behavior") { // check result for (int i = 0; i < cnt; i++) { def tuple2 = sqls.get(i) - def retry = 90 + def retry = 180 def res = sql "select stmt from __internal_schema.audit_log where stmt like '%3F6B9A_${i}%' order by time asc limit 1" while (res.isEmpty()) { if (retry-- < 0) { - throw new RuntimeException("It has retried a few but still failed, you need to check it") + logger.warn("It has retried a few but still failed, you need to check it") + return } sleep(1000) res = sql "select stmt from __internal_schema.audit_log where stmt like '%3F6B9A_${i}%' order by time asc limit 1" diff --git a/regression-test/suites/auth_p0/test_http_meta_databases_auth.groovy b/regression-test/suites/auth_p0/test_http_meta_databases_auth.groovy new file mode 100644 index 00000000000000..c515b5c83ea722 --- /dev/null +++ b/regression-test/suites/auth_p0/test_http_meta_databases_auth.groovy @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_http_meta_databases_auth","p0,auth,nonConcurrent") { + String suiteName = "test_http_meta_databases_auth" + String dbName = context.config.getDbNameByFile(context.file) + String tableName = "${suiteName}_table" + String user = "${suiteName}_user" + String pwd = 'C123_567p' + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + try { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "true"); """ + def getDatabases = { check_func -> + httpTest { + basicAuthorization "${user}","${pwd}" + endpoint "${context.config.feHttpAddress}" + uri "/rest/v2/api/meta/namespaces/default_cluster/databases" + op "get" + check check_func + } + } + + getDatabases.call() { + respCode, body -> + log.info("body:${body}") + assertFalse("${body}".contains("${dbName}")) + } + + sql """grant select_priv on ${dbName} to ${user}""" + + getDatabases.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("${dbName}")) + } + + try_sql("DROP USER ${user}") + } finally { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "false"); """ + } +} diff --git a/regression-test/suites/auth_p0/test_http_meta_tables_auth.groovy b/regression-test/suites/auth_p0/test_http_meta_tables_auth.groovy new file mode 100644 index 00000000000000..b2fd5914352808 --- /dev/null +++ b/regression-test/suites/auth_p0/test_http_meta_tables_auth.groovy @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_http_meta_tables_auth","p0,auth,nonConcurrent") { + String suiteName = "test_http_meta_tables_auth" + String dbName = context.config.getDbNameByFile(context.file) + String tableName = "${suiteName}_table" + String user = "${suiteName}_user" + String pwd = 'C123_567p' + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """drop table if exists `${tableName}`""" + sql """ + CREATE TABLE `${tableName}` ( + `k1` int, + `k2` int + ) ENGINE=OLAP + DISTRIBUTED BY random BUCKETS auto + PROPERTIES ('replication_num' = '1') ; + """ + try { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "true"); """ + def getTables = { check_func -> + httpTest { + basicAuthorization "${user}","${pwd}" + endpoint "${context.config.feHttpAddress}" + uri "/rest/v2/api/meta/namespaces/default_cluster/databases/${dbName}/tables" + op "get" + check check_func + } + } + + getTables.call() { + respCode, body -> + log.info("body:${body}") + assertFalse("${body}".contains("${tableName}")) + } + + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + + getTables.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("${tableName}")) + } + + sql """drop table if exists `${tableName}`""" + try_sql("DROP USER ${user}") + } finally { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "false"); """ + } + + +} diff --git a/regression-test/suites/auth_p0/test_http_meta_tables_schema_auth.groovy b/regression-test/suites/auth_p0/test_http_meta_tables_schema_auth.groovy new file mode 100644 index 00000000000000..f03d5a55bd32fb --- /dev/null +++ b/regression-test/suites/auth_p0/test_http_meta_tables_schema_auth.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_http_meta_tables_schema_auth","p0,auth,nonConcurrent") { + String suiteName = "test_http_meta_tables_schema_auth" + String dbName = context.config.getDbNameByFile(context.file) + String tableName = "${suiteName}_table" + String user = "${suiteName}_user" + String pwd = 'C123_567p' + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """drop table if exists `${tableName}`""" + sql """ + CREATE TABLE `${tableName}` ( + `k1` int, + `k2` int + ) ENGINE=OLAP + DISTRIBUTED BY random BUCKETS auto + PROPERTIES ('replication_num' = '1') ; + """ + + try { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "true"); """ + def getSchema = { check_func -> + httpTest { + basicAuthorization "${user}","${pwd}" + endpoint "${context.config.feHttpAddress}" + uri "/rest/v2/api/meta/namespaces/default_cluster/databases/${dbName}/tables/${tableName}/schema" + op "get" + check check_func + } + } + + getSchema.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("401")) + } + + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + + getSchema.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("${tableName}")) + } + + sql """drop table if exists `${tableName}`""" + try_sql("DROP USER ${user}") + } finally { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "false"); """ + } +} diff --git a/regression-test/suites/auth_p0/test_http_table_count_auth.groovy b/regression-test/suites/auth_p0/test_http_table_count_auth.groovy new file mode 100644 index 00000000000000..2cf222b1f587a2 --- /dev/null +++ b/regression-test/suites/auth_p0/test_http_table_count_auth.groovy @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_http_table_count_auth","p0,auth,nonConcurrent") { + String suiteName = "test_http_table_count_auth" + String dbName = context.config.getDbNameByFile(context.file) + String tableName = "${suiteName}_table" + String user = "${suiteName}_user" + String pwd = 'C123_567p' + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """drop table if exists `${tableName}`""" + sql """ + CREATE TABLE `${tableName}` ( + `k1` int, + `k2` int + ) ENGINE=OLAP + DISTRIBUTED BY random BUCKETS auto + PROPERTIES ('replication_num' = '1') ; + """ + sql """insert into ${tableName} values(1,1)""" + try { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "true"); """ + def getCount = { check_func -> + httpTest { + basicAuthorization "${user}","${pwd}" + endpoint "${context.config.feHttpAddress}" + uri "/api/${dbName}/${tableName}/_count" + op "get" + check check_func + } + } + + getCount.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("401")) + } + + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + + getCount.call() { + respCode, body -> + log.info("body:${body}") + assertFalse("${body}".contains("401")) + } + + sql """drop table if exists `${tableName}`""" + try_sql("DROP USER ${user}") + } finally { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "false"); """ + } +} diff --git a/regression-test/suites/auth_p0/test_http_table_data_auth.groovy b/regression-test/suites/auth_p0/test_http_table_data_auth.groovy new file mode 100644 index 00000000000000..3a773894a5659a --- /dev/null +++ b/regression-test/suites/auth_p0/test_http_table_data_auth.groovy @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_http_table_data_auth","p0,auth,nonConcurrent") { + String suiteName = "test_http_table_data_auth" + String dbName = context.config.getDbNameByFile(context.file) + String tableName = "${suiteName}_table" + String user = "${suiteName}_user" + String pwd = 'C123_567p' + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """drop table if exists `${tableName}`""" + sql """ + CREATE TABLE `${tableName}` ( + `k1` int, + `k2` int + ) ENGINE=OLAP + DISTRIBUTED BY random BUCKETS auto + PROPERTIES ('replication_num' = '1') ; + """ + sql """insert into ${tableName} values(1,1)""" + try { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "true"); """ + def getTableData = { check_func -> + httpTest { + basicAuthorization "${user}","${pwd}" + endpoint "${context.config.feHttpAddress}" + uri "/api/show_table_data?db=${dbName}&table=${tableName}" + op "get" + check check_func + } + } + + def getDbData = { check_func -> + httpTest { + basicAuthorization "${user}","${pwd}" + endpoint "${context.config.feHttpAddress}" + uri "/api/show_table_data?db=${dbName}" + op "get" + check check_func + } + } + + getTableData.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("401")) + } + + getDbData.call() { + respCode, body -> + log.info("body:${body}") + assertFalse("${body}".contains("${tableName}")) + } + + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + + getTableData.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("${tableName}")) + } + + getDbData.call() { + respCode, body -> + log.info("body:${body}") + assertTrue("${body}".contains("${tableName}")) + } + + sql """drop table if exists `${tableName}`""" + try_sql("DROP USER ${user}") + } finally { + sql """ ADMIN SET ALL FRONTENDS CONFIG ("enable_all_http_auth" = "false"); """ + } +} diff --git a/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy b/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy new file mode 100644 index 00000000000000..3487c93b0d6572 --- /dev/null +++ b/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_backup_restore_atomic_cancel") { + String suiteName = "test_backup_restore_atomic_cancelled" + String repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + String dbName = "${suiteName}_db" + String tableName = "${suiteName}_table" + String tableName1 = "${suiteName}_table_1" + String viewName = "${suiteName}_view" + String snapshotName = "${suiteName}_snapshot" + + def syncer = getSyncer() + syncer.createS3Repository(repoName) + + sql "CREATE DATABASE IF NOT EXISTS ${dbName}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + sql """ + CREATE TABLE ${dbName}.${tableName} ( + `id` LARGEINT NOT NULL, + `count` LARGEINT SUM DEFAULT "0") + AGGREGATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ( + "replication_num" = "1" + ) + """ + sql "DROP TABLE IF EXISTS ${dbName}.${tableName1}" + sql """ + CREATE TABLE ${dbName}.${tableName1} ( + `id` LARGEINT NOT NULL, + `count` LARGEINT SUM DEFAULT "0") + AGGREGATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ( + "replication_num" = "1" + ) + """ + sql "DROP VIEW IF EXISTS ${dbName}.${viewName}" + sql """ + CREATE VIEW ${dbName}.${viewName} + AS + SELECT id, count FROM ${dbName}.${tableName} + WHERE id > 5 + """ + + List values = [] + for (int i = 1; i <= 10; ++i) { + values.add("(${i}, ${i})") + } + sql "INSERT INTO ${dbName}.${tableName} VALUES ${values.join(",")}" + def result = sql "SELECT * FROM ${dbName}.${tableName}" + assertEquals(result.size(), values.size()); + + sql "INSERT INTO ${dbName}.${tableName1} VALUES ${values.join(",")}" + result = sql "SELECT * FROM ${dbName}.${tableName1}" + assertEquals(result.size(), values.size()); + + + sql """ + BACKUP SNAPSHOT ${dbName}.${snapshotName} + TO `${repoName}` + """ + + syncer.waitSnapshotFinish(dbName) + + // alter view and restore, it must failed because the signatures are not matched + + sql """ + ALTER VIEW ${dbName}.${viewName} + AS + SELECT id,count FROM ${dbName}.${tableName} + WHERE id < 100 + + """ + + sql "INSERT INTO ${dbName}.${tableName} VALUES (11, 11)" + + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + sql """ + RESTORE SNAPSHOT ${dbName}.${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "atomic_restore" = "true" + ) + """ + + syncer.waitAllRestoreFinish(dbName) + + def restore_result = sql_return_maparray """ SHOW RESTORE FROM ${dbName} WHERE Label ="${snapshotName}" """ + restore_result.last() + logger.info("show restore result: ${restore_result}") + assertTrue(restore_result.last().State == "CANCELLED") + + + // Do not affect any tables. + result = sql "SELECT * FROM ${dbName}.${tableName}" + assertEquals(result.size(), values.size() + 1); + + result = sql "SELECT * FROM ${dbName}.${tableName1}" + assertEquals(result.size(), values.size()); + + sql "DROP TABLE ${dbName}.${tableName} FORCE" + sql "DROP TABLE ${dbName}.${tableName1} FORCE" + sql "DROP DATABASE ${dbName} FORCE" + sql "DROP REPOSITORY `${repoName}`" +} + + diff --git a/regression-test/suites/backup_restore/test_backup_restore_atomic_with_alter.groovy b/regression-test/suites/backup_restore/test_backup_restore_atomic_with_alter.groovy new file mode 100644 index 00000000000000..46a3ca5b29dbf2 --- /dev/null +++ b/regression-test/suites/backup_restore/test_backup_restore_atomic_with_alter.groovy @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_backup_restore_atomic_with_alter", "backup_restore") { + if (!getFeConfig("enable_debug_points").equals("true")) { + logger.info("Config.enable_debug_points=true is required") + return + } + + String suiteName = "test_backup_restore_atomic_with_alter" + String dbName = "${suiteName}_db" + String repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + String snapshotName = "snapshot_" + UUID.randomUUID().toString().replace("-", "") + String tableNamePrefix = "${suiteName}_tables" + + def syncer = getSyncer() + syncer.createS3Repository(repoName) + sql "DROP DATABASE IF EXISTS ${dbName} FORCE" + sql "CREATE DATABASE ${dbName}" + + // during restoring, if: + // 1. table_0 not exists, create table_0 is not allowed + // 2. table_1 exists, alter operation is not allowed + // 3. table_1 exists, drop table is not allowed + // 4. table_0 not exists, rename table_2 to table_0 is not allowed + int numTables = 3; + List tables = [] + for (int i = 0; i < numTables; ++i) { + String tableName = "${tableNamePrefix}_${i}" + tables.add(tableName) + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + sql """ + CREATE TABLE ${dbName}.${tableName} ( + `id` LARGEINT NOT NULL, + `count` LARGEINT SUM DEFAULT "0" + ) + AGGREGATE KEY(`id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("10"), + PARTITION p2 VALUES LESS THAN ("20"), + PARTITION p3 VALUES LESS THAN ("30"), + PARTITION p4 VALUES LESS THAN ("40"), + PARTITION p5 VALUES LESS THAN ("50"), + PARTITION p6 VALUES LESS THAN ("60"), + PARTITION p7 VALUES LESS THAN ("120") + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ( + "replication_num" = "1" + ) + """ + } + + int numRows = 10; + List values = [] + for (int j = 1; j <= numRows; ++j) { + values.add("(${j}0, ${j}0)") + } + + sql "INSERT INTO ${dbName}.${tableNamePrefix}_0 VALUES ${values.join(",")}" + sql "INSERT INTO ${dbName}.${tableNamePrefix}_1 VALUES ${values.join(",")}" + sql "INSERT INTO ${dbName}.${tableNamePrefix}_2 VALUES ${values.join(",")}" + + // only backup table 0,1 + sql """ + BACKUP SNAPSHOT ${dbName}.${snapshotName} + TO `${repoName}` + ON ( + ${tableNamePrefix}_0, + ${tableNamePrefix}_1 + ) + """ + + syncer.waitSnapshotFinish(dbName) + + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + + // drop table_0 + sql "DROP TABLE ${dbName}.${tableNamePrefix}_0 FORCE" + + // disable restore + GetDebugPoint().enableDebugPointForAllFEs("FE.PAUSE_NON_PENDING_RESTORE_JOB", [value:snapshotName]) + + sql """ + RESTORE SNAPSHOT ${dbName}.${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "atomic_restore" = "true" + ) + """ + + boolean restore_paused = false + for (int k = 0; k < 60; k++) { + def records = sql_return_maparray """ SHOW RESTORE FROM ${dbName} WHERE Label = "${snapshotName}" """ + if (records.size() == 1 && records[0].State != 'PENDING') { + restore_paused = true + break + } + logger.info("SHOW RESTORE result: ${records}") + sleep(3000) + } + assertTrue(restore_paused) + + // 0. table_1 has in_atomic_restore property + def show_result = sql """ SHOW CREATE TABLE ${dbName}.${tableNamePrefix}_1 """ + logger.info("SHOW CREATE TABLE ${tableNamePrefix}_1: ${show_result}") + assertTrue(show_result[0][1].contains("in_atomic_restore")) + + // 1. create a restoring table (not exists before) + expectExceptionLike({ -> + sql """ + CREATE TABLE ${dbName}.${tableNamePrefix}_0 + ( + `id` LARGEINT NOT NULL, + `count` LARGEINT SUM DEFAULT "0" + ) + AGGREGATE KEY(`id`) + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES LESS THAN ("10"), + PARTITION p2 VALUES LESS THAN ("20"), + PARTITION p3 VALUES LESS THAN ("30"), + PARTITION p4 VALUES LESS THAN ("40"), + PARTITION p5 VALUES LESS THAN ("50"), + PARTITION p6 VALUES LESS THAN ("60"), + PARTITION p7 VALUES LESS THAN ("120") + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ( + "replication_num" = "1" + ) + """ + }, "is in atomic restore, please cancel the restore operation firstly") + + // 2. alter is not allowed + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + ADD PARTITION p8 VALUES LESS THAN("200") + """ + }, "Do not allow doing ALTER ops") + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + DROP PARTITION p1 + """ + }, "Do not allow doing ALTER ops") + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + MODIFY PARTITION p1 SET ("key"="value") + """ + }, "Do not allow doing ALTER ops") + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + ADD COLUMN new_col INT DEFAULT "0" AFTER count + """ + }, "Do not allow doing ALTER ops") + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + DROP COLUMN count + """ + }, "Do not allow doing ALTER ops") + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + SET ("is_being_synced"="false") + """ + }, "Do not allow doing ALTER ops") + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_1 + RENAME newTableName + """ + }, "Do not allow doing ALTER ops") + // BTW, the tmp table also don't allow rename + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.__doris_atomic_restore_prefix__${tableNamePrefix}_1 + RENAME newTableName + """ + }, "Do not allow doing ALTER ops") + // 3. drop table is not allowed + expectExceptionLike({ + sql """ + DROP TABLE ${dbName}.${tableNamePrefix}_1 + """ + }, "state is in atomic restore") + expectExceptionLike({ + sql """ + DROP TABLE ${dbName}.__doris_atomic_restore_prefix__${tableNamePrefix}_1 + """ + }, "state is RESTORE") + // 4. the table name is occupied + expectExceptionLike({ + sql """ + ALTER TABLE ${dbName}.${tableNamePrefix}_2 + RENAME ${tableNamePrefix}_0 + """ + }, "is already used (in restoring)") + + + sql "CANCEL RESTORE FROM ${dbName}" + + // 5. The restore job is cancelled, the in_atomic_restore property has been removed. + show_result = sql """ SHOW CREATE TABLE ${dbName}.${tableNamePrefix}_1 """ + logger.info("SHOW CREATE TABLE ${tableNamePrefix}_1: ${show_result}") + assertFalse(show_result[0][1].contains("in_atomic_restore")) + + for (def tableName in tables) { + sql "DROP TABLE IF EXISTS ${dbName}.${tableName} FORCE" + } + sql "DROP DATABASE ${dbName} FORCE" + sql "DROP REPOSITORY `${repoName}`" +} + + + diff --git a/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy b/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy new file mode 100644 index 00000000000000..9d090281364245 --- /dev/null +++ b/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_backup_restore_atomic_with_view", "backup_restore") { + String suiteName = "backup_restore_atomic_with_view" + String dbName = "${suiteName}_db" + String dbName1 = "${suiteName}_db_1" + String repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + String snapshotName = "${suiteName}_snapshot" + String tableName = "${suiteName}_table" + String viewName = "${suiteName}_view" + + def syncer = getSyncer() + syncer.createS3Repository(repoName) + sql "CREATE DATABASE IF NOT EXISTS ${dbName}" + sql "CREATE DATABASE IF NOT EXISTS ${dbName1}" + + int numRows = 10; + sql "DROP TABLE IF EXISTS ${dbName}.${tableName} FORCE" + sql "DROP VIEW IF EXISTS ${dbName}.${viewName}" + sql """ + CREATE TABLE ${dbName}.${tableName} ( + `id` LARGEINT NOT NULL, + `count` LARGEINT SUM DEFAULT "0" + ) + AGGREGATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES + ( + "replication_num" = "1" + ) + """ + List values = [] + for (int j = 1; j <= numRows; ++j) { + values.add("(${j}, ${j})") + } + sql "INSERT INTO ${dbName}.${tableName} VALUES ${values.join(",")}" + + sql """CREATE VIEW ${dbName}.${viewName} (id, count) + AS + SELECT * FROM ${dbName}.${tableName} WHERE count > 5 + """ + + qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id ASC" + qt_sql "SELECT * FROM ${dbName}.${viewName} ORDER BY id ASC" + + sql """ + BACKUP SNAPSHOT ${dbName}.${snapshotName} + TO `${repoName}` + """ + + syncer.waitSnapshotFinish(dbName) + + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + + // restore new view + sql "DROP TABLE IF EXISTS ${dbName1}.${tableName} FORCE" + sql "DROP VIEW IF EXISTS ${dbName1}.${viewName}" + + sql """ + RESTORE SNAPSHOT ${dbName1}.${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "atomic_restore" = "true", + "reserve_replica" = "true" + ) + """ + + syncer.waitAllRestoreFinish(dbName1) + + qt_sql "SELECT * FROM ${dbName1}.${tableName} ORDER BY id ASC" + qt_sql "SELECT * FROM ${dbName1}.${viewName} ORDER BY id ASC" + def show_view_result = sql_return_maparray "SHOW VIEW FROM ${tableName} FROM ${dbName1}" + logger.info("show view result: ${show_view_result}") + assertTrue(show_view_result.size() == 1); + def show_view = show_view_result[0]['Create View'] + assertTrue(show_view.contains("${dbName1}")) + assertTrue(show_view.contains("${tableName}")) + + // restore an exists view + sql """ + RESTORE SNAPSHOT ${dbName}.${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "atomic_restore" = "true", + "reserve_replica" = "true" + ) + """ + + syncer.waitAllRestoreFinish(dbName) + def restore_result = sql_return_maparray """ SHOW RESTORE FROM ${dbName} WHERE Label ="${snapshotName}" """ + restore_result.last() + logger.info("show restore result: ${restore_result}") + assertTrue(restore_result.last().State == "FINISHED") + + // View could read the incremental data. + sql "INSERT INTO ${dbName}.${tableName} VALUES (11, 11)" + + qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id ASC" + qt_sql "SELECT * FROM ${dbName}.${viewName} ORDER BY id ASC" + + sql "DROP REPOSITORY `${repoName}`" +} + + diff --git a/regression-test/suites/backup_restore/test_backup_restore_clean_restore.groovy b/regression-test/suites/backup_restore/test_backup_restore_clean_restore.groovy index 3b27a32b69d592..88e07f9e769975 100644 --- a/regression-test/suites/backup_restore/test_backup_restore_clean_restore.groovy +++ b/regression-test/suites/backup_restore/test_backup_restore_clean_restore.groovy @@ -77,6 +77,7 @@ suite("test_backup_restore_clean_restore", "backup_restore") { ) """ + sql "INSERT INTO ${dbName}.${tableName2} VALUES ${values.join(",")}" result = sql "SELECT * FROM ${dbName}.${tableName2}" assertEquals(result.size(), numRows); @@ -106,6 +107,25 @@ suite("test_backup_restore_clean_restore", "backup_restore") { result = sql "SELECT * FROM ${dbName}.${tableName3}" assertEquals(result.size(), numRows); + // view 1 must exists + String viewName1 = "${tableNamePrefix}_4" + sql "DROP VIEW IF EXISTS ${dbName}.${viewName1}" + sql """ + CREATE VIEW ${dbName}.${viewName1} (k1, k2) + AS + SELECT id as k1, count as k2 FROM ${dbName}.${tableName1} + WHERE id in (1,3,5,7,9) + """ + + // view 2 will be deleted + String viewName2 = "${tableNamePrefix}_5" + sql "DROP VIEW IF EXISTS ${dbName}.${viewName2}" + sql """ + CREATE VIEW ${dbName}.${viewName2} (k1, k2) + AS + SELECT id as k1, count as k2 FROM ${dbName}.${tableName3} + WHERE id in (1,3,5,7,9) + """ sql """ BACKUP SNAPSHOT ${dbName}.${snapshotName} @@ -117,13 +137,14 @@ suite("test_backup_restore_clean_restore", "backup_restore") { def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) assertTrue(snapshot != null) - // restore table1, partition 3 of table2 + // restore table1, partition 3 of table2, view1 sql """ RESTORE SNAPSHOT ${dbName}.${snapshotName} FROM `${repoName}` ON ( `${tableName1}`, - `${tableName2}` PARTITION (`p3`) + `${tableName2}` PARTITION (`p3`), + `${viewName1}` ) PROPERTIES ( @@ -144,12 +165,23 @@ suite("test_backup_restore_clean_restore", "backup_restore") { result = sql "SELECT * FROM ${dbName}.${tableName2}" assertEquals(result.size(), numRows-10) + // view1 are exists + result = sql """ SHOW VIEW FROM ${tableName1} FROM ${dbName} """ + assertEquals(result.size(), 1) + + // view2 are dropped + result = sql """ + SHOW TABLE STATUS FROM ${dbName} LIKE "${viewName2}" + """ + assertEquals(result.size(), 0) + // table3 are dropped result = sql """ SHOW TABLE STATUS FROM ${dbName} LIKE "${tableName3}" """ assertEquals(result.size(), 0) + sql "DROP VIEW ${dbName}.${viewName1}" sql "DROP TABLE ${dbName}.${tableName1} FORCE" sql "DROP TABLE ${dbName}.${tableName2} FORCE" sql "DROP DATABASE ${dbName} FORCE" diff --git a/regression-test/suites/backup_restore/test_backup_restore_with_view.groovy b/regression-test/suites/backup_restore/test_backup_restore_with_view.groovy index be7769953230db..10b21bb3442082 100644 --- a/regression-test/suites/backup_restore/test_backup_restore_with_view.groovy +++ b/regression-test/suites/backup_restore/test_backup_restore_with_view.groovy @@ -109,6 +109,23 @@ suite("test_backup_restore_with_view", "backup_restore") { logger.info("show restore result: ${restore_result}") assertTrue(restore_result.last().State == "FINISHED") + // restore to db1, test the view signature. + sql """ + RESTORE SNAPSHOT ${dbName1}.${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true" + ) + """ + + syncer.waitAllRestoreFinish(dbName1) + restore_result = sql_return_maparray """ SHOW RESTORE FROM ${dbName1} WHERE Label ="${snapshotName}" """ + restore_result.last() + logger.info("show restore result: ${restore_result}") + assertTrue(restore_result.last().State == "FINISHED") + sql "DROP TABLE ${dbName}.${tableName} FORCE" sql "DROP VIEW ${dbName}.${viewName}" sql "DROP DATABASE ${dbName} FORCE" diff --git a/regression-test/suites/catalog_recycle_bin_p0/test_insert_overwrite_recover.groovy b/regression-test/suites/catalog_recycle_bin_p0/test_insert_overwrite_recover.groovy new file mode 100644 index 00000000000000..71faaf849ba44a --- /dev/null +++ b/regression-test/suites/catalog_recycle_bin_p0/test_insert_overwrite_recover.groovy @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_insert_overwrite_recover") { + def table = "test_insert_overwrite_recover" + + // create table and insert data + sql """ drop table if exists ${table} force""" + sql """ + create table ${table} ( + `id` int(11), + `name` varchar(128), + `da` date + ) + engine=olap + duplicate key(id) + partition by range(da)( + PARTITION p3 VALUES LESS THAN ('2023-01-01'), + PARTITION p4 VALUES LESS THAN ('2024-01-01'), + PARTITION p5 VALUES LESS THAN ('2025-01-01') + ) + distributed by hash(id) buckets 2 + properties( + "replication_num"="1", + "light_schema_change"="true" + ); + """ + + sql """ insert into ${table} values(1, 'a', '2022-01-02'); """ + sql """ insert into ${table} values(2, 'a', '2023-01-02'); """ + sql """ insert into ${table} values(3, 'a', '2024-01-02'); """ + sql """ SYNC;""" + + qt_select_check_1 """ select * from ${table} order by id,name,da; """ + + sql """ insert overwrite table ${table} values(3, 'a', '2024-01-02'); """ + + qt_select_check_1 """ select * from ${table} order by id,name,da; """ + + sql """ ALTER TABLE ${table} DROP PARTITION p3 force; """ + sql """ ALTER TABLE ${table} DROP PARTITION p4 force; """ + sql """ ALTER TABLE ${table} DROP PARTITION p5 force; """ + + sql """ recover partition p3 from ${table}; """ + sql """ recover partition p4 from ${table}; """ + sql """ recover partition p5 from ${table}; """ + + qt_select_check_1 """ select * from ${table} order by id,name,da; """ + +} diff --git a/regression-test/suites/check_before_quit/check_before_quit.groovy b/regression-test/suites/check_before_quit/check_before_quit.groovy index 23850a7c8a59f8..7b097b58cd23c6 100644 --- a/regression-test/suites/check_before_quit/check_before_quit.groovy +++ b/regression-test/suites/check_before_quit/check_before_quit.groovy @@ -126,6 +126,7 @@ suite("check_before_quit", "nonConcurrent,p0") { def command_metrics = "curl http://${beHost}:${bePort}/metrics" def command_vars = "curl http://${beHost}:${beBrpcPort}/vars" + def command_load_channels = "curl http://${beHost}:${bePort}/api/load_channels" def command_load_streams = "curl http://${beHost}:${bePort}/api/load_streams" while ((System.currentTimeMillis() - beginTime) < timeoutMs) { clear = true @@ -214,6 +215,16 @@ suite("check_before_quit", "nonConcurrent,p0") { break } + logger.info("executing command: ${command_load_channels}") + def process_load_channels = command_load_channels.execute() + def outputStream_load_channels = new StringBuffer() + def errorStream_load_channels = new StringBuffer() + process_load_channels.consumeProcessOutput(outputStream_load_channels, errorStream_load_channels) + def code_load_channels = process_load_channels.waitFor() + def load_channels = outputStream_load_channels.toString() + logger.info("Request BE load_channels: code=" + code_load_channels + ", err=" + errorStream_load_channels.toString()) + logger.info("load_channels: " + load_channels); + logger.info("executing command: ${command_load_streams}") def process_load_streams = command_load_streams.execute() def outputStream_load_streams = new StringBuffer() diff --git a/regression-test/suites/cloud_p0/auth/test_grant_revoke_cluster_to_user.groovy b/regression-test/suites/cloud_p0/auth/test_grant_revoke_cluster_to_user.groovy index ab9660c5891337..9cd752bdff5547 100644 --- a/regression-test/suites/cloud_p0/auth/test_grant_revoke_cluster_to_user.groovy +++ b/regression-test/suites/cloud_p0/auth/test_grant_revoke_cluster_to_user.groovy @@ -107,7 +107,7 @@ suite("test_grant_revoke_cluster_to_user", "cloud_auth") { connect(user = "${user3}", password = 'Cloud12345', url = context.config.jdbcUrl) { test { sql """select * from ${db}.${tbl}""" - exception "or you may not have permission to access the current cluster" + exception "or you may not have permission to access the current compute group" } } @@ -135,7 +135,7 @@ suite("test_grant_revoke_cluster_to_user", "cloud_auth") { connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { test { sql """use @${cluster1}""" - exception "Cluster ${cluster1} not exist" + exception "${cluster1} not exist" } result = sql_return_maparray """show grants for '${user1}'""" commonAuth result, "'${user1}'@'%'", "Yes", "admin", "Admin_priv" diff --git a/regression-test/suites/cloud_p0/auth/test_grant_revoke_compute_group_to_user.groovy b/regression-test/suites/cloud_p0/auth/test_grant_revoke_compute_group_to_user.groovy new file mode 100644 index 00000000000000..a57b9e9a07f427 --- /dev/null +++ b/regression-test/suites/cloud_p0/auth/test_grant_revoke_compute_group_to_user.groovy @@ -0,0 +1,299 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_grant_revoke_compute_group_to_user", "cloud_auth") { + if (!isCloudMode()) { + log.info("not cloud mode just return") + return + } + def role = "admin" + def user1 = "regression_test_compute_group_user1" + def user2 = "regression_test_compute_group_user2" + def user3 = "regression_test_compute_group_user3" + def tbl = "test_auth_compute_group_tbl" + + def logAndExecuteSql = { sqlStatement -> + log.info("Executing SQL: ${sqlStatement}") + return sql(sqlStatement) + } + + logAndExecuteSql """drop user if exists ${user1}""" + logAndExecuteSql """drop user if exists ${user2}""" + logAndExecuteSql """drop user if exists ${user3}""" + logAndExecuteSql """drop table if exists ${tbl}""" + + def getCluster = { group -> + def result = sql " SHOW COMPUTE GROUPS; " + for (int i = 0; i < result.size(); i++) { + if (result[i][0] == group) { + return result[i] + } + } + return null + } + + def commonAuth = { result, UserIdentity, Password, Roles, GlobalPrivs -> + assertEquals(UserIdentity as String, result.UserIdentity[0] as String) + assertEquals(Password as String, result.Password[0] as String) + assertEquals(Roles as String, result.Roles[0] as String) + assertEquals(GlobalPrivs as String, result.GlobalPrivs[0] as String) + } + + def getProperty = { property, user -> + def result = null + if (user == "") { + result = sql_return_maparray """SHOW PROPERTY""" + } else { + result = sql_return_maparray """SHOW PROPERTY FOR '${user}'""" + } + result.find { + it.Key == property as String + } + } + + def groups = sql " SHOW COMPUTE GROUPS; " + logger.info("compute groups {}", groups); + assertTrue(!groups.isEmpty()) + def validCluster = groups[0][0] + + // 1. change user + // ${user1} admin role + logAndExecuteSql """create user ${user1} identified by 'Cloud12345' default role 'admin'""" + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertNull(result.ComputeGroupPrivs[0]) + + + // ${user2} not admin role + logAndExecuteSql """create user ${user2} identified by 'Cloud12345'""" + logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${validCluster}' TO '${user2}'""" + // for use default_group:regression_test + logAndExecuteSql """grant select_priv on *.*.* to ${user2}""" + + + logAndExecuteSql """ + CREATE TABLE ${tbl} ( + `k1` int(11) NULL, + `k2` char(5) NULL + ) + DUPLICATE KEY(`k1`, `k2`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`k1`) BUCKETS 1 + PROPERTIES ( + "replication_num"="1" + ); + """ + + logAndExecuteSql """ + insert into ${tbl} (k1, k2) values (1, "10"); + """ + + logAndExecuteSql """create user ${user3} identified by 'Cloud12345'""" + logAndExecuteSql """GRANT SELECT_PRIV ON *.*.* TO '${user3}'@'%'""" + result = connect(user = "${user3}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """SHOW COMPUTE GROUPS""" + } + // not grant any group to user3 + assertTrue(result.isEmpty()) + def db = context.dbName + + connect(user = "${user3}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """select * from ${db}.${tbl}""" + exception "or you may not have permission to access the current compute group" + } + } + + // 2. grant group + def group1 = "groupA" + def result + + logAndExecuteSql "sync" + + // admin role user can grant group to use + result = connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + } + + // case run user(default root), and show grant again, should be same result + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${group1}""" + exception "${group1} not exist" + } + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'", "Yes", "admin", "Admin_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + } + + + logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user2}'""" + try { + result = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Access denied; you need all [Grant_priv, Cluster_usage_priv] privilege(s) for this operation"), e.getMessage()) + } + logAndExecuteSql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user2}'""" + + // default compute group + logAndExecuteSql """SET PROPERTY FOR '${user1}' 'default_compute_group' = '${validCluster}'""" + logAndExecuteSql """SET PROPERTY FOR '${user2}' 'default_compute_group' = '${validCluster}'""" + def show_group_1 = getCluster(validCluster) + + assertTrue(show_group_1[2].contains(user2), "Expect contain users ${user2}") + + result = getProperty("default_compute_group", "${user1}") + assertEquals(result.Value as String, "${validCluster}" as String) + + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + result = sql """use @${validCluster}""" + assertEquals(result[0][0], 0) + result = getProperty("default_compute_group", "") + assertEquals(result.Value as String, "${validCluster}" as String) + } + // set default_compute_group to '' + logAndExecuteSql """SET PROPERTY FOR '${user2}' 'default_compute_group' = ''""" + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + result = getProperty("default_compute_group", "") + assertEquals(result.Value as String, "" as String) + } + + logAndExecuteSql """SET PROPERTY FOR '${user2}' 'default_compute_group' = '${validCluster}'""" + result = logAndExecuteSql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${validCluster}' FROM '${user2}'""" + assertEquals(result[0][0], 0) + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${group1}""" + exception "USAGE denied to user" + } + } + + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${validCluster}""" + exception "USAGE denied to user" + } + } + + logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user2}'""" + logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${validCluster}' TO '${user2}'""" + show_group_2 = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + getCluster(validCluster) + } + + assertTrue(show_group_2[2].equals(user2), "Expect just only have user ${user2}") + + result = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """USE @${validCluster}""" + } + assertEquals(result[0][0], 0) + + logAndExecuteSql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${validCluster}' FROM '${user2}'""" + + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${validCluster}""" + exception "USAGE denied to user" + } + result = sql_return_maparray """show grants for '${user2}'""" + commonAuth result, "'${user2}'@'%'" as String, "Yes", "", "Select_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + test { + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP 'NotExistCluster' FROM '${user2}'""" + exception "Access denied; you need all" + } + } + + logAndExecuteSql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${validCluster}' FROM '${user2}'""" + result = sql_return_maparray """show grants for '${user2}'""" + commonAuth result, "'${user2}'@'%'" as String, "Yes", "", "Select_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + logAndExecuteSql "sync" + // 3. revoke group + // admin role user can revoke group + result = connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user1}'""" + } + + // revoke GRANT_PRIV from general user, he can not revoke group to other user. + logAndExecuteSql """revoke GRANT_PRIV on *.*.* from ${user2}""" + + logAndExecuteSql "sync" + + // general user can't revoke group + try { + result = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user2}'""" + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Access denied; you need all"), e.getMessage()) + } + + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertNull(result.ComputeGroupPrivs[0]) + + result = sql_return_maparray """show grants for '${user2}'""" + commonAuth result, "'${user2}'@'%'" as String, "Yes", "", "Select_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + // revoke user1 admin role + logAndExecuteSql """REVOKE 'admin' FROM ${user1}""" + result = sql_return_maparray """show grants for '${user1}'""" + assertEquals("'${user1}'@'%'" as String, result.UserIdentity[0] as String) + assertEquals("", result.Roles[0]) + assertNull(result.GlobalPrivs[0]) + assertNull(result.ComputeGroupPrivs[0]) + + // user1 no admin auth, so failed to set other default compute group + try { + result = connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """SET PROPERTY FOR '${user2}' 'default_compute_group' = '${validCluster}'""" + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Access denied for user"), e.getMessage()) + } + + logAndExecuteSql """drop user if exists ${user1}""" + // grant not exists user + result = logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO 'NotExitUser'""" + assertEquals(result[0][0], 0) + + // drop user and grant he group priv + result = logAndExecuteSql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + assertEquals(result[0][0], 0) + result = logAndExecuteSql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user1}'""" + assertEquals(result[0][0], 0) + // general user can't grant group to use + logAndExecuteSql """drop user if exists ${user2}""" + logAndExecuteSql """drop user if exists ${user3}""" +} + + diff --git a/regression-test/suites/cloud_p0/cache/http/test_calc_cache_file_hash.groovy b/regression-test/suites/cloud_p0/cache/http/test_calc_cache_file_hash.groovy new file mode 100644 index 00000000000000..6d800c7c5f003b --- /dev/null +++ b/regression-test/suites/cloud_p0/cache/http/test_calc_cache_file_hash.groovy @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_calc_cache_file_hash") { + sql """ use @regression_cluster_name1 """ + String[][] backends = sql """ show backends """ + String backendId; + def backendIdToBackendIP = [:] + def backendIdToBackendHttpPort = [:] + def backendIdToBackendBrpcPort = [:] + for (String[] backend in backends) { + if (backend[9].equals("true") && backend[19].contains("regression_cluster_name1")) { + backendIdToBackendIP.put(backend[0], backend[1]) + backendIdToBackendHttpPort.put(backend[0], backend[4]) + backendIdToBackendBrpcPort.put(backend[0], backend[5]) + } + } + assertEquals(backendIdToBackendIP.size(), 1) + + backendId = backendIdToBackendIP.keySet()[0] + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=hash&value=0200000000000001bf42c14374fff491ffb7c89a1a65c5bb_0.dat""" + logger.info("calc cache file hash URL:" + url) + def httpAction = { check_func -> + httpTest { + endpoint "" + uri url + op "get" + body "" + check check_func + } + } + + httpAction.call() { + respCode, body -> { + assertEquals(respCode, 200) + def map = parseJson(body) + assertEquals(map.get("hash"), "c6a599f453f67f0949f80ad9990fa3dd") + } + } +} diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy new file mode 100644 index 00000000000000..a086731efffce4 --- /dev/null +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy @@ -0,0 +1,265 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_warm_up_compute_group") { + def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ + def getJobState = { jobId -> + def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ + return jobStateResult[0][2] + } + def table = "customer" + + List ipList = new ArrayList<>(); + List hbPortList = new ArrayList<>() + List httpPortList = new ArrayList<>() + List brpcPortList = new ArrayList<>() + List beUniqueIdList = new ArrayList<>() + + String[] bes = context.config.multiClusterBes.split(','); + println("the value is " + context.config.multiClusterBes); + int num = 0 + for(String values : bes) { + if (num++ == 2) break; + println("the value is " + values); + String[] beInfo = values.split(':'); + ipList.add(beInfo[0]); + hbPortList.add(beInfo[1]); + httpPortList.add(beInfo[2]); + beUniqueIdList.add(beInfo[3]); + brpcPortList.add(beInfo[4]); + } + + println("the ip is " + ipList); + println("the heartbeat port is " + hbPortList); + println("the http port is " + httpPortList); + println("the be unique id is " + beUniqueIdList); + println("the brpc port is " + brpcPortList); + + sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text + sql new File("""${context.file.parent}/../ddl/supplier_delete.sql""").text + // create table if not exists + sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + ttlProperties) + sql (new File("""${context.file.parent}/../ddl/supplier.sql""").text + ttlProperties) + + sql """ TRUNCATE TABLE __internal_schema.cloud_cache_hotspot; """ + sleep(30000) + + def s3BucketName = getS3BucketName() + def s3WithProperties = """WITH S3 ( + |"AWS_ACCESS_KEY" = "${getS3AK()}", + |"AWS_SECRET_KEY" = "${getS3SK()}", + |"AWS_ENDPOINT" = "${getS3Endpoint()}", + |"AWS_REGION" = "${getS3Region()}", + |"provider" = "${getS3Provider()}") + |PROPERTIES( + |"exec_mem_limit" = "8589934592", + |"load_parallelism" = "3")""".stripMargin() + + + + def clearFileCache = { ip, port -> + httpTest { + endpoint "" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" + } + } + + def getMetricsMethod = { ip, port, check_func -> + httpTest { + endpoint ip + ":" + port + uri "/brpc_metrics" + op "get" + check check_func + } + } + + clearFileCache.call(ipList[0], httpPortList[0]); + clearFileCache.call(ipList[1], httpPortList[1]); + + def load_customer_once = { + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + def loadLabel = table + "_" + uniqueID + // load data from cos + def loadSql = new File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}", s3BucketName) + loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + s3WithProperties + sql loadSql + + // check load state + while (true) { + def stateResult = sql "show load where Label = '${loadLabel}'" + def loadState = stateResult[stateResult.size() - 1][2].toString() + if ("CANCELLED".equalsIgnoreCase(loadState)) { + throw new IllegalStateException("load ${loadLabel} failed.") + } else if ("FINISHED".equalsIgnoreCase(loadState)) { + break + } + sleep(5000) + } + } + + def load_supplier_once = { + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + def loadLabel = "supplier_" + uniqueID + // load data from cos + def loadSql = new File("""${context.file.parent}/../ddl/supplier_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}", s3BucketName) + loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + s3WithProperties + sql loadSql + + // check load state + while (true) { + def stateResult = sql "show load where Label = '${loadLabel}'" + def loadState = stateResult[stateResult.size() - 1][2].toString() + if ("CANCELLED".equalsIgnoreCase(loadState)) { + throw new IllegalStateException("load ${loadLabel} failed.") + } else if ("FINISHED".equalsIgnoreCase(loadState)) { + break + } + sleep(5000) + } + } + + sql "use @regression_cluster_name0" + load_customer_once() + load_customer_once() + load_customer_once() + load_customer_once() + load_customer_once() + load_supplier_once() + load_supplier_once() + load_supplier_once() + + for (int i = 0; i < 1000; i++) { + sql "select count(*) from customer" + sql "select count(*) from supplier" + } + sleep(40000) + def jobId_ = sql "WARM UP COMPUTE GROUP regression_cluster_name1 WITH COMPUTE GROUP regression_cluster_name0" + def waitJobDone = { jobId -> + int retryTime = 120 + int i = 0 + for (; i < retryTime; i++) { + sleep(1000) + def status = getJobState(jobId[0][0]) + logger.info(status) + if (status.equals("CANCELLED")) { + assertTrue(false); + } + if (status.equals("FINISHED")) { + break; + } + } + if (i == retryTime) { + sql "cancel warm up job where id = ${jobId[0][0]}" + assertTrue(false); + } + } + waitJobDone(jobId_) + + sleep(30000) + long ttl_cache_size = 0 + getMetricsMethod.call(ipList[0], brpcPortList[0]) { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag = false; + for (String line in strs) { + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + ttl_cache_size = line.substring(i).toLong() + flag = true + break + } + } + assertTrue(flag) + } + + getMetricsMethod.call(ipList[1], brpcPortList[1]) { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag = false; + for (String line in strs) { + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + assertEquals(ttl_cache_size, line.substring(i).toLong()) + flag = true + break + } + } + assertTrue(flag) + } + + try { + sql "WARM UP COMPUTE GROUP regression_cluster_name1 WITH COMPUTE GROUP regression_cluster_name2" + assertTrue(false) + } catch (Exception e) { + assertTrue(true) + } + + try { + sql "WARM UP COMPUTE GROUP regression_cluster_name2 WITH COMPUTE GROUP regression_cluster_name0" + assertTrue(false) + } catch (Exception e) { + assertTrue(true) + } + + try { + sql "WARM UP COMPUTE GROUP regression_cluster_name0 WITH COMPUTE GROUP regression_cluster_name0" + assertTrue(false) + } catch (Exception e) { + assertTrue(true) + } + + sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text + sql new File("""${context.file.parent}/../ddl/supplier_delete.sql""").text + + clearFileCache.call(ipList[1], httpPortList[1]); + jobId_ = sql "WARM UP COMPUTE GROUP regression_cluster_name1 WITH COMPUTE GROUP regression_cluster_name0" + waitJobDone(jobId_) + sleep(40000) + getMetricsMethod.call(ipList[1], brpcPortList[1]) { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag = false; + for (String line in strs) { + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def j = line.indexOf(' ') + assertEquals(0, line.substring(j).toLong()) + flag = true + break + } + } + assertTrue(flag) + } +} diff --git a/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy b/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy index 2ce9a9d8f4b531..d9b105ec92d552 100644 --- a/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy +++ b/regression-test/suites/cloud_p0/multi_cluster/test_auto_start.groovy @@ -22,7 +22,7 @@ import org.awaitility.Awaitility; import org.apache.doris.regression.util.Http import static java.util.concurrent.TimeUnit.SECONDS; -suite('test_auto_start_in_cloud', 'multi_cluster') { +suite('test_auto_start_in_cloud', 'multi_cluster, docker') { if (!isCloudMode()) { return; } @@ -100,7 +100,7 @@ suite('test_auto_start_in_cloud', 'multi_cluster') { def jsonSlurper = new JsonSlurper() def jsonObject = jsonSlurper.parseText(tag) - String cloudClusterId = jsonObject.cloud_cluster_id + String cloudClusterId = jsonObject.compute_group_id String uniqueId = jsonObject.cloud_unique_id sleep(5 * 1000) @@ -130,7 +130,7 @@ suite('test_auto_start_in_cloud', 'multi_cluster') { tag = getCloudBeTagByName(clusterName) logger.info("tag = {}", tag) jsonObject = jsonSlurper.parseText(tag) - String cluster_status = jsonObject.cloud_cluster_status + String cluster_status = jsonObject.compute_group_status cluster_status == "SUSPENDED" } @@ -158,7 +158,7 @@ suite('test_auto_start_in_cloud', 'multi_cluster') { tag = getCloudBeTagByName(clusterName) logger.info("tag = {}", tag) jsonObject = jsonSlurper.parseText(tag) - String cluster_status = jsonObject.cloud_cluster_status + String cluster_status = jsonObject.compute_group_status cluster_status == "TO_RESUME" } sleep(5 * 1000) @@ -168,5 +168,29 @@ suite('test_auto_start_in_cloud', 'multi_cluster') { future1.get() future2.get() + + tag = getCloudBeTagByName(clusterName) + logger.info("tag check = {}", tag) + jsonObject = jsonSlurper.parseText(tag) + String cluster_status = jsonObject.compute_group_status + assertEquals("NORMAL", cluster_status) + + // add 1 nodes, check it status NORMAL + cluster.addBackend(1, null) + dockerAwaitUntil(5) { + result = sql """SHOW BACKENDS""" + result.size() == 4 + } + + def bes = sql_return_maparray "SHOW BACKENDS" + bes.each { + tag = it.Tag + if (!tag.contains(clusterName)) { + return + } + jsonObject = jsonSlurper.parseText(tag) + cluster_status = jsonObject.compute_group_status + assertEquals("NORMAL", cluster_status) + } } } diff --git a/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy b/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy index 90fd6656b8ffbb..68b89b6f3d661c 100644 --- a/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy +++ b/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy @@ -52,7 +52,7 @@ suite('test_tvf_in_cloud', 'multi_cluster,docker') { def jsonSlurper = new JsonSlurper() def jsonObject = jsonSlurper.parseText(tag) - def cloudClusterId = jsonObject.cloud_cluster_id + def cloudClusterId = jsonObject.compute_group_id // multi cluster env // current cluster diff --git a/regression-test/suites/cloud_p0/node_mgr/test_not_allowed_op.groovy b/regression-test/suites/cloud_p0/node_mgr/test_not_allowed_op.groovy new file mode 100644 index 00000000000000..0ad6fd4ccdc0a5 --- /dev/null +++ b/regression-test/suites/cloud_p0/node_mgr/test_not_allowed_op.groovy @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite('test_not_allowed_op', 'p0') { + if (!isCloudMode()) { + return; + } + + // Test modifying frontend is not allowed + try { + // Get current frontend information + def frontendResult = sql_return_maparray """SHOW FRONTENDS""" + logger.info("Current frontends: ${frontendResult}") + + // Extract the first frontend's information + def firstFrontend = frontendResult[0] + def frontendHost = firstFrontend['Host'] + def frontendEditLogPort = firstFrontend['EditLogPort'] + + // Construct the frontend address + def frontendAddress = "${frontendHost}:${frontendEditLogPort}" + logger.info("Attempting to modify frontend: ${frontendAddress}") + def result = sql """ ALTER SYSTEM MODIFY FRONTEND "${frontendAddress}" HOSTNAME 'localhost' """ + logger.info("Modify frontend result: ${result}") + throw new IllegalStateException("Expected exception was not thrown") + } catch (Exception e) { + assertTrue(e.getMessage().contains("Modifying frontend hostname is not supported in cloud mode")) + } + + // Get current backend information + def backendResult = sql_return_maparray """SHOW BACKENDS""" + logger.info("Current backends: ${backendResult}") + + // Extract the first backend's information + def firstBackend = backendResult[0] + def backendHost = firstBackend['Host'] + def backendHeartbeatPort = firstBackend['HeartbeatPort'] + + // Construct the backend address + def backendAddress = "${backendHost}:${backendHeartbeatPort}" + // Test modifying backend is not allowed + try { + logger.info("Attempting to modify backend: ${backendAddress}") + def result = sql """ ALTER SYSTEM MODIFY BACKEND '${backendAddress}' SET("tag.location" = "tag1") """ + logger.info("Modify backend result: ${result}") + throw new IllegalStateException("Expected exception was not thrown") + } catch (Exception e) { + logger.info("Caught expected exception: ${e.getMessage()}") + assertTrue(e.getMessage().contains("Modifying backends is not supported in cloud mode")) + } + + // Test modifying backend hostname is not allowed + try { + sql """ ALTER SYSTEM MODIFY BACKEND "${backendAddress}" HOSTNAME 'localhost' """ + throw new IllegalStateException("Expected exception was not thrown") + } catch (Exception e) { + assertTrue(e.getMessage().contains("Modifying backend hostname is not supported in cloud mode")) + } + + logger.info("All tests for disallowed operations in cloud mode passed successfully") +} \ No newline at end of file diff --git a/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy b/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy index c79219aeac2bb7..77f7d05ff299c0 100644 --- a/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy +++ b/regression-test/suites/cloud_p0/node_mgr/test_sql_mode_node_mgr.groovy @@ -26,6 +26,8 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { def clusterOptions = [ new ClusterOptions(), new ClusterOptions(), + new ClusterOptions(), + new ClusterOptions(), ] for (options in clusterOptions) { @@ -40,12 +42,22 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { "heartbeat_interval_second=1",] } - clusterOptions[0].beCloudInstanceId = true; + clusterOptions[0].sqlModeNodeMgr = true; + clusterOptions[0].beClusterId = true; clusterOptions[0].beMetaServiceEndpoint = true; - clusterOptions[1].beCloudInstanceId = false; + clusterOptions[1].sqlModeNodeMgr = true; + clusterOptions[1].beClusterId = false; clusterOptions[1].beMetaServiceEndpoint = false; + clusterOptions[2].sqlModeNodeMgr = false; + clusterOptions[2].beClusterId = true; + clusterOptions[2].beMetaServiceEndpoint = true; + + clusterOptions[3].sqlModeNodeMgr = false; + clusterOptions[3].beClusterId = false; + clusterOptions[3].beMetaServiceEndpoint = false; + for (options in clusterOptions) { docker(options) { logger.info("docker started"); @@ -145,8 +157,24 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { cluster.restartFrontends(); cluster.restartBackends(); - sleep(30000) - context.reconnectFe() + def reconnectFe = { + sleep(10000) + logger.info("Reconnecting to a new frontend...") + def newFe = cluster.getMasterFe() + if (newFe) { + logger.info("New frontend found: ${newFe.host}:${newFe.httpPort}") + def url = String.format( + "jdbc:mysql://%s:%s/?useLocalSessionState=true&allowLoadLocalInfile=false", + newFe.host, newFe.queryPort) + url = context.config.buildUrlWithDb(url, context.dbName) + context.connectTo(url, context.config.jdbcUser, context.config.jdbcPassword) + logger.info("Successfully reconnected to the new frontend") + } else { + logger.error("No new frontend found to reconnect") + } + } + + reconnectFe() checkClusterStatus(3, 3, 1) @@ -186,7 +214,7 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { // CASE 3. Add the dropped backend back logger.info("Adding back the dropped backend: {}:{}", backendHost, backendHeartbeatPort) - sql """ ALTER SYSTEM ADD BACKEND "${backendHost}:${backendHeartbeatPort}"; """ + sql """ ALTER SYSTEM ADD BACKEND "${backendHost}:${backendHeartbeatPort}" PROPERTIES ("tag.compute_group_name" = "another_compute_group"); """ // Wait for the backend to be fully added back maxWaitSeconds = 300 @@ -207,6 +235,30 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { checkClusterStatus(3, 3, 3) + // CASE 4. Check compute groups + logger.info("Checking compute groups") + + def computeGroups = sql_return_maparray("SHOW COMPUTE GROUPS") + logger.info("Compute groups: {}", computeGroups) + + // Verify that we have at least two compute groups + assert computeGroups.size() >= 2, "Expected at least 2 compute groups, but got ${computeGroups.size()}" + + // Verify that we have a 'default_compute_group' and 'another_compute_group' + def defaultGroup = computeGroups.find { it['IsCurrent'] == "TRUE" } + def anotherGroup = computeGroups.find { it['IsCurrent'] == "FALSE" } + + assert defaultGroup != null, "Expected to find 'default_compute_group'" + assert anotherGroup != null, "Expected to find 'another_compute_group'" + + // Verify that 'another_compute_group' has exactly one backend + assert anotherGroup['BackendNum'] == '1', "Expected 'another_compute_group' to have 1 backend, but it has ${anotherGroup['BackendNum']}" + + // Verify that 'default_compute_group' has the remaining backends + assert defaultGroup['BackendNum'] == '2', "Expected 'default_compute_group' to have 2 backends, but it has ${defaultGroup['BackendNum']}" + + logger.info("Compute groups verified successfully") + // CASE 4. If a fe is dropped, query and writing also work. // Get the list of frontends def frontends = sql_return_maparray("SHOW FRONTENDS") @@ -218,16 +270,18 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { def feHost = feToDropMap['Host'] def feEditLogPort = feToDropMap['EditLogPort'] + def feRole = feToDropMap['Role'] logger.info("Dropping non-master frontend: {}:{}", feHost, feEditLogPort) // Drop the selected non-master frontend - sql """ ALTER SYSTEM DROP FOLLOWER "${feHost}:${feEditLogPort}"; """ + sql """ ALTER SYSTEM DROP ${feRole} "${feHost}:${feEditLogPort}"; """ // Wait for the frontend to be fully dropped maxWaitSeconds = 300 waited = 0 while (waited < maxWaitSeconds) { + reconnectFe() def currentFrontends = sql_return_maparray("SHOW FRONTENDS") if (currentFrontends.size() == frontends.size() - 1) { logger.info("Non-master frontend successfully dropped") @@ -286,6 +340,59 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { logger.info("Frontend successfully added back and cluster status verified") + // CASE 6. Drop frontend and add back again + logger.info("Dropping frontend and adding back again") + + // Get the frontend to be dropped + def frontendToDrop = frontends.find { it['Host'] == feHost && it['EditLogPort'] == feEditLogPort } + assert frontendToDrop != null, "Could not find the frontend to drop" + + // Drop the frontend + sql """ ALTER SYSTEM DROP FOLLOWER "${feHost}:${feEditLogPort}"; """ + sleep(30000) + reconnectFe() + + // Wait for the frontend to be fully dropped + maxWaitSeconds = 300 + waited = 0 + while (waited < maxWaitSeconds) { + def updatedFrontends = sql_return_maparray("SHOW FRONTENDS") + if (!updatedFrontends.any { it['Host'] == feHost && it['EditLogPort'] == feEditLogPort }) { + logger.info("Frontend successfully dropped") + break + } + sleep(10000) + waited += 10 + } + + if (waited >= maxWaitSeconds) { + throw new Exception("Timeout waiting for frontend to be dropped") + } + + // Add the frontend back + sql """ ALTER SYSTEM ADD FOLLOWER "${feHost}:${feEditLogPort}"; """ + + // Wait for the frontend to be fully added back + maxWaitSeconds = 300 + waited = 0 + while (waited < maxWaitSeconds) { + def updatedFrontends = sql_return_maparray("SHOW FRONTENDS") + if (updatedFrontends.any { it['Host'] == feHost && it['EditLogPort'] == feEditLogPort }) { + logger.info("Frontend successfully added back") + break + } + sleep(10000) + waited += 10 + } + + if (waited >= maxWaitSeconds) { + throw new Exception("Timeout waiting for frontend to be added back") + } + + // Verify cluster status after adding the frontend back + checkClusterStatus(3, 3, 6) + + logger.info("Frontend successfully added back and cluster status verified") // CASE 6. If fe can not drop itself. // 6. Attempt to drop the master FE and expect an exception logger.info("Attempting to drop the master frontend") @@ -333,7 +440,7 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { def originalBackendCount = 3 // As per the initial setup in this test assert currentBackends.size() == originalBackendCount, "Number of backends should remain unchanged after attempting to drop a non-existent backend" - checkClusterStatus(3, 3, 6) + checkClusterStatus(3, 3, 7) // CASE 8. Decommission a backend and verify the process logger.info("Attempting to decommission a backend") @@ -381,7 +488,7 @@ suite('test_sql_mode_node_mgr', 'docker,p1') { logger.info("Successfully decommissioned backend and verified its status") - checkClusterStatus(3, 3, 7) + checkClusterStatus(3, 3, 8) } } diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy index bedc0f8ee1bc45..b65557b059c800 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_ck.groovy @@ -18,10 +18,6 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_compaction_uniq_keys_ck") { - if (isCloudMode()) { - logger.info("cloud does not support mow cluster key") - return - } def tableName = "compaction_uniq_keys_ck" try { diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy index bf4e8dc1a51004..73f2f069ca9319 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store_ck.groovy @@ -19,10 +19,6 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_compaction_uniq_keys_row_store_ck", "p0") { - if (isCloudMode()) { - logger.info("cloud does not support mow cluster key") - return - } def realDb = "regression_test_serving_p0" def tableName = realDb + ".test_compaction_uniq_keys_row_store_ck" sql "CREATE DATABASE IF NOT EXISTS ${realDb}" diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy index a21d73b7f26e6e..21af1a9220788c 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete_ck.groovy @@ -18,10 +18,6 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_compaction_uniq_keys_with_delete_ck") { - if (isCloudMode()) { - logger.info("cloud does not support mow cluster key") - return - } def tableName = "test_compaction_uniq_keys_with_delete_ck" try { diff --git a/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy b/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy new file mode 100644 index 00000000000000..2219cc175b534b --- /dev/null +++ b/regression-test/suites/compaction/test_cu_compaction_remove_old_version_delete_bitmap.groovy @@ -0,0 +1,316 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_cu_compaction_remove_old_version_delete_bitmap", "nonConcurrent") { + if (!isCloudMode()) { + return + } + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + def backendId_to_params = [string: [:]] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + def set_be_param = { paramName, paramValue -> + // for eache be node, set paramName=paramValue + for (String id in backendId_to_backendIP.keySet()) { + def beIp = backendId_to_backendIP.get(id) + def bePort = backendId_to_backendHttpPort.get(id) + def (code, out, err) = curl("POST", String.format("http://%s:%s/api/update_config?%s=%s", beIp, bePort, paramName, paramValue)) + assertTrue(out.contains("OK")) + } + } + + def reset_be_param = { paramName -> + // for eache be node, reset paramName to default + for (String id in backendId_to_backendIP.keySet()) { + def beIp = backendId_to_backendIP.get(id) + def bePort = backendId_to_backendHttpPort.get(id) + def original_value = backendId_to_params.get(id).get(paramName) + def (code, out, err) = curl("POST", String.format("http://%s:%s/api/update_config?%s=%s", beIp, bePort, paramName, original_value)) + assertTrue(out.contains("OK")) + } + } + + def get_be_param = { paramName -> + // for eache be node, get param value by default + def paramValue = "" + for (String id in backendId_to_backendIP.keySet()) { + def beIp = backendId_to_backendIP.get(id) + def bePort = backendId_to_backendHttpPort.get(id) + // get the config value from be + def (code, out, err) = curl("GET", String.format("http://%s:%s/api/show_config?conf_item=%s", beIp, bePort, paramName)) + assertTrue(code == 0) + assertTrue(out.contains(paramName)) + // parsing + def resultList = parseJson(out)[0] + assertTrue(resultList.size() == 4) + // get original value + paramValue = resultList[2] + backendId_to_params.get(id, [:]).put(paramName, paramValue) + } + } + + def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> + if (compact_type == "cumulative") { + def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) + logger.info("Run compaction: code=" + code_1 + ", out=" + out_1 + ", err=" + err_1) + assertEquals(code_1, 0) + return out_1 + } else if (compact_type == "full") { + def (code_2, out_2, err_2) = be_run_full_compaction(be_host, be_http_port, tablet_id) + logger.info("Run compaction: code=" + code_2 + ", out=" + out_2 + ", err=" + err_2) + assertEquals(code_2, 0) + return out_2 + } else { + assertFalse(True) + } + } + + def getTabletStatus = { be_host, be_http_port, tablet_id -> + boolean running = true + Thread.sleep(1000) + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/compaction/show?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get tablet status: =" + code + ", out=" + out) + assertEquals(code, 0) + def tabletStatus = parseJson(out.trim()) + return tabletStatus + } + + def waitForCompaction = { be_host, be_http_port, tablet_id -> + boolean running = true + do { + Thread.sleep(1000) + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run_status?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get compaction status: code=" + code + ", out=" + out) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + + def getDeleteBitmapStatus = { be_host, be_http_port, tablet_id -> + boolean running = true + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/delete_bitmap/count?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get delete bitmap count status: =" + code + ", out=" + out) + assertEquals(code, 0) + def deleteBitmapStatus = parseJson(out.trim()) + return deleteBitmapStatus + } + + def testTable = "test_cu_compaction_remove_old_version_delete_bitmap" + def timeout = 10000 + sql """ DROP TABLE IF EXISTS ${testTable}""" + def testTableDDL = """ + create table ${testTable} + ( + `plan_id` bigint(20) NOT NULL, + `target_id` int(20) NOT NULL, + `target_name` varchar(255) NOT NULL + ) + ENGINE=OLAP + UNIQUE KEY(`plan_id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`plan_id`) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "replication_allocation" = "tag.location.default: 1", + "disable_auto_compaction" = "true" + ); + """ + sql testTableDDL + sql "sync" + + // store the original value + get_be_param("compaction_promotion_version_count") + get_be_param("tablet_rowset_stale_sweep_time_sec") + set_be_param("compaction_promotion_version_count", "5") + set_be_param("tablet_rowset_stale_sweep_time_sec", "0") + + try { + GetDebugPoint().enableDebugPointForAllBEs("CloudCumulativeCompaction.modify_rowsets.delete_expired_stale_rowsets") + // 1. test normal + sql "sync" + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,1,'1'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,2,'2'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,3,'3'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,4,'4'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,5,'5'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,6,'6'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,7,'7'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,8,'8'); """ + + qt_sql "select * from ${testTable} order by plan_id" + + // trigger compaction to generate base rowset + def tablets = sql_return_maparray """ show tablets from ${testTable}; """ + logger.info("tablets: " + tablets) + def delete_bitmap_count = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + String trigger_backend_id = tablet.BackendId + getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + + // before compaction, delete_bitmap_count is (rowsets num - 1) + delete_bitmap_count = getDeleteBitmapStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id).delete_bitmap_count + assertTrue(delete_bitmap_count == 7) + logger.info("delete_bitmap_count:" + delete_bitmap_count) + + assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) + getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + } + + qt_sql "select * from ${testTable} order by plan_id" + + def now = System.currentTimeMillis() + + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,9,'9'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,10,'10'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,11,'11'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,12,'12'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,13,'13'); """ + + def time_diff = System.currentTimeMillis() - now + logger.info("time_diff:" + time_diff) + assertTrue(time_diff <= timeout, "wait_for_insert_into_values timeout") + + qt_sql "select * from ${testTable} order by plan_id" + + // trigger cu compaction to remove old version delete bitmap + + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + + // before compaction, delete_bitmap_count is (rowsets num - 1) + String trigger_backend_id = tablet.BackendId + delete_bitmap_count = getDeleteBitmapStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id).delete_bitmap_count + logger.info("delete_bitmap_count:" + delete_bitmap_count) + assertTrue(delete_bitmap_count == 12) + + getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) + getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + + Thread.sleep(1000) + // after compaction, delete_bitmap_count is 1 + delete_bitmap_count = getDeleteBitmapStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id).delete_bitmap_count + logger.info("delete_bitmap_count:" + delete_bitmap_count) + assertTrue(delete_bitmap_count == 1) + } + + qt_sql "select * from ${testTable} order by plan_id" + + // 2. test update delete bitmap failed + + now = System.currentTimeMillis() + + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,19,'19'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,20,'20'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,21,'21'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,22,'22'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,23,'23'); """ + + time_diff = System.currentTimeMillis() - now + logger.info("time_diff:" + time_diff) + assertTrue(time_diff <= timeout, "wait_for_insert_into_values timeout") + + qt_sql "select * from ${testTable} order by plan_id" + GetDebugPoint().enableDebugPointForAllBEs("CloudCumulativeCompaction.modify_rowsets.update_delete_bitmap_failed") + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + String trigger_backend_id = tablet.BackendId + + delete_bitmap_count = getDeleteBitmapStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id).delete_bitmap_count + assertTrue(delete_bitmap_count == 6) + logger.info("delete_bitmap_count:" + delete_bitmap_count) + + getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + assertTrue(triggerCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) + getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + + // update fail, delete_bitmap_count will not change + Thread.sleep(1000) + delete_bitmap_count = getDeleteBitmapStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id).delete_bitmap_count + assertTrue(delete_bitmap_count == 6) + logger.info("delete_bitmap_count:" + delete_bitmap_count) + } + + qt_sql "select * from ${testTable} order by plan_id" + + now = System.currentTimeMillis() + + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,24,'24'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,25,'25'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,26,'26'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,27,'27'); """ + sql """ INSERT INTO ${testTable} VALUES (0,0,'0'),(1,28,'28'); """ + + time_diff = System.currentTimeMillis() - now + logger.info("time_diff:" + time_diff) + assertTrue(time_diff <= timeout, "wait_for_insert_into_values timeout") + + qt_sql "select * from ${testTable} order by plan_id" + GetDebugPoint().disableDebugPointForAllBEs("CloudCumulativeCompaction.modify_rowsets.update_delete_bitmap_failed") + } finally { + reset_be_param("compaction_promotion_version_count") + reset_be_param("tablet_rowset_stale_sweep_time_sec") + GetDebugPoint().disableDebugPointForAllBEs("CloudCumulativeCompaction.modify_rowsets.delete_expired_stale_rowsets") + GetDebugPoint().disableDebugPointForAllBEs("CloudCumulativeCompaction.modify_rowsets.update_delete_bitmap_failed") + } + +} \ No newline at end of file diff --git a/regression-test/suites/compaction/test_full_compaction.groovy b/regression-test/suites/compaction/test_full_compaction.groovy index 217a4da707cefb..60f52f6f5a55a0 100644 --- a/regression-test/suites/compaction/test_full_compaction.groovy +++ b/regression-test/suites/compaction/test_full_compaction.groovy @@ -19,10 +19,6 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_full_compaction") { def tableName = "test_full_compaction" - def isCloudMode = { - def ret = sql_return_maparray """show backends""" - ret.Tag[0].contains("cloud_cluster_name") - } try { String backend_id; @@ -171,7 +167,7 @@ suite("test_full_compaction") { assert tabletJson.rowsets instanceof List rowsetCount +=((List) tabletJson.rowsets).size() } - def cloudMode = isCloudMode.call() + def cloudMode = isCloudMode() if (cloudMode) { assert (rowsetCount == 2) } else { diff --git a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy index 22c8dc9f84d263..66f9274d9d499e 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys_ck.groovy @@ -18,10 +18,6 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_vertical_compaction_uniq_keys_ck") { - if (isCloudMode()) { - logger.info("cloud does not support mow cluster key") - return - } def tableName = "test_vertical_compaction_uniq_keys_ck" try { diff --git a/regression-test/suites/correctness/test_column_nullable_cache.groovy b/regression-test/suites/correctness/test_column_nullable_cache.groovy new file mode 100644 index 00000000000000..cf871c617aad42 --- /dev/null +++ b/regression-test/suites/correctness/test_column_nullable_cache.groovy @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_column_nullable_cache") { + sql """ + drop table if exists test_column_nullable_cache; + """ + sql """ + CREATE TABLE `test_column_nullable_cache` ( + `col_int_undef_signed2` int NULL, + `col_int_undef_signed` int NULL, + `col_int_undef_signed3` int NULL, + `col_int_undef_signed4` int NULL, + `pk` int NULL + ) ENGINE=OLAP + DUPLICATE KEY(`col_int_undef_signed2`) + DISTRIBUTED by RANDOM BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + + sql """ + insert into test_column_nullable_cache + (pk,col_int_undef_signed,col_int_undef_signed2,col_int_undef_signed3,col_int_undef_signed4) + values (0,3,7164641,5,8),(1,null,3916062,5,6),(2,1,5533498,0,9),(3,7,2,null,7057679),(4,1,0,7,7), + (5,null,4,2448564,1),(6,7531976,7324373,9,7),(7,3,1,1,3),(8,6,8131576,9,-1793807),(9,9,2,4214547,9), + (10,-7299852,5,1,3),(11,7,3,-1036551,5),(12,-6108579,84823,4,1229534),(13,-1065629,5,4,null),(14,null,8072633,3328285,2), + (15,2,7,6,6),(16,8,5,-4582103,1),(17,5,-4677722,-2379367,4),(18,-7807532,-6686732,0,5329341), + (19,8,7,-4013246,-7013374),(20,0,2,9,2),(21,7,2383333,5,4),(22,5844611,2,2,0),(23,0,4756185,0,-5612039), + (24,6,4878754,608172,0),(25,null,7858692,7,-6704206),(26,7,-1697597,6,9),(27,9,-7021349,3,-3094786), + (28,2,2830915,null,8),(29,4133633,489212,5,9),(30,6,-3346211,3668768,2),(31,1,4862070,-5066405,0),(32,9,6,7,8), + (33,2,null,4,2),(34,1,2893430,-3282825,5),(35,2,3,4,2),(36,4,-3418732,6,1263819),(37,5,4,-6342170,6),(99,9,2,8,null); + """ + + qt_test1 """ + select * from test_column_nullable_cache where col_int_undef_signed3 IS NULL and col_int_undef_signed3 = col_int_undef_signed3; + """ + + qt_test2 """ + select count(*) from test_column_nullable_cache where col_int_undef_signed3 IS NULL and col_int_undef_signed3 = col_int_undef_signed3; + """ +} diff --git a/regression-test/suites/correctness/test_scan_keys_with_bool_type.groovy b/regression-test/suites/correctness/test_scan_keys_with_bool_type.groovy new file mode 100644 index 00000000000000..1eaa605823616b --- /dev/null +++ b/regression-test/suites/correctness/test_scan_keys_with_bool_type.groovy @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_scan_keys_with_bool_type") { + sql """ DROP TABLE IF EXISTS test_scan_keys_with_bool_type """ + + sql """ + CREATE TABLE `test_scan_keys_with_bool_type` ( + `col1` tinyint NOT NULL, + `col2` boolean NOT NULL, + `col3` tinyint NOT NULL, + `col5` boolean REPLACE NOT NULL, + `col4` datetime(2) REPLACE NOT NULL, + `col6` double REPLACE_IF_NOT_NULL NULL, + `col7` datetime(3) REPLACE_IF_NOT_NULL NULL + ) ENGINE=OLAP + AGGREGATE KEY(`col1`, `col2`, `col3`) + DISTRIBUTED BY HASH(`col1`, `col2`, `col3`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "disable_auto_compaction" = "true" + ); + """ + + sql """ insert into test_scan_keys_with_bool_type values + ( -100 , 0 , -82 , 0 , '2023-11-11 10:49:43.00' , 840968969.872149 , NULL ), + ( -100 , 1 , -82 , 1 , '2024-02-16 04:37:37.00' , -1299962421.904282 , NULL ), + ( -100 , 1 , 92 , 1 , '2024-02-16 04:37:37.00' , 23423423.0324234 , NULL ); + """ + + qt_select1 " select * from test_scan_keys_with_bool_type order by 1, 2, 3, 4, 5, 6, 7; " + qt_select2 " select * from test_scan_keys_with_bool_type where col1 <= -100 and col2 in (true, false) and col3 = -82 order by 1, 2, 3, 4, 5, 6, 7; " + qt_select3 " select * from test_scan_keys_with_bool_type where col1 <= -100 and col3 = -82 order by 1, 2, 3, 4, 5, 6, 7; " + sql """ DROP TABLE IF EXISTS test_scan_keys_with_bool_type2 """ + + sql """ + CREATE TABLE `test_scan_keys_with_bool_type2` ( + `col1` tinyint NOT NULL, + `col2` int NOT NULL, + `col3` tinyint NOT NULL, + `col5` boolean REPLACE NOT NULL, + `col4` datetime(2) REPLACE NOT NULL, + `col6` double REPLACE_IF_NOT_NULL NULL, + `col7` datetime(3) REPLACE_IF_NOT_NULL NULL + ) ENGINE=OLAP + AGGREGATE KEY(`col1`, `col2`, `col3`) + DISTRIBUTED BY HASH(`col1`, `col2`, `col3`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "disable_auto_compaction" = "true" + ); + """ + + sql """ insert into test_scan_keys_with_bool_type2 values + ( -100 , 0 , -82 , 0 , '2023-11-11 10:49:43.00' , 840968969.872149 , NULL ), + ( -100 , 1 , -82 , 1 , '2024-02-16 04:37:37.00' , -1299962421.904282 , NULL ), + ( -100 , 2 , -82 , 1 , '2024-02-16 04:37:37.00' , -1299962421.904282 , NULL ), + ( -100 , 1 , 92 , 1 , '2024-02-16 04:37:37.00' , 23423423.0324234 , NULL ); + """ + + qt_select3 " select * from test_scan_keys_with_bool_type2 order by 1, 2, 3, 4, 5, 6, 7; " + qt_select4 " select * from test_scan_keys_with_bool_type2 where col1 <= -100 and col2 in (1, 2) and col3 = -82 order by 1, 2, 3, 4, 5, 6, 7; " + qt_select5 " select * from test_scan_keys_with_bool_type2 where col1 <= -100 and col3 = -82 order by 1, 2, 3, 4, 5, 6, 7; " + + + sql """ DROP TABLE IF EXISTS test_scan_keys_with_bool_type3 """ + + sql """ + CREATE TABLE `test_scan_keys_with_bool_type3` ( + `col1` tinyint NOT NULL, + `col2` char NOT NULL, + `col3` tinyint NOT NULL, + `col5` boolean REPLACE NOT NULL, + `col4` datetime(2) REPLACE NOT NULL, + `col6` double REPLACE_IF_NOT_NULL NULL, + `col7` datetime(3) REPLACE_IF_NOT_NULL NULL + ) ENGINE=OLAP + AGGREGATE KEY(`col1`, `col2`, `col3`) + DISTRIBUTED BY HASH(`col1`, `col2`, `col3`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "disable_auto_compaction" = "true" + ); + """ + + sql """ insert into test_scan_keys_with_bool_type3 values + ( -100 , 'a' , -82 , 0 , '2023-11-11 10:49:43.00' , 840968969.872149 , NULL ), + ( -100 , 'b', -82 , 1 , '2024-02-16 04:37:37.00' , -1299962421.904282 , NULL ), + ( -100 , 'b' , 92 , 1 , '2024-02-16 04:37:37.00' , 23423423.0324234 , NULL ), + ( -100 , 'c' , 92 , 1 , '2024-02-16 04:37:37.00' , 23423423.0324234 , NULL ); + """ + + qt_select6 " select * from test_scan_keys_with_bool_type3 order by 1, 2, 3, 4, 5, 6, 7; " + qt_select7 " select * from test_scan_keys_with_bool_type3 where col1 <= -100 and col2 in ('a', 'b') and col3 = -82 order by 1, 2, 3, 4, 5, 6, 7; " + qt_select8 " select * from test_scan_keys_with_bool_type3 where col1 <= -100 and col3 = -82 order by 1, 2, 3, 4, 5, 6, 7; " +} diff --git a/regression-test/suites/correctness_p0/test_always_nullable_window_function_legacy_planner.groovy b/regression-test/suites/correctness_p0/test_always_nullable_window_function_legacy_planner.groovy deleted file mode 100644 index 3c8194d05e0fd1..00000000000000 --- a/regression-test/suites/correctness_p0/test_always_nullable_window_function_legacy_planner.groovy +++ /dev/null @@ -1,174 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_always_nullable_window_function_legacy_planner") { - sql """ set enable_nereids_planner = false; """ - - def tableName = "test_always_nullable_window_function_table_legacy_planner" - def nullableTableName = "test_always_nullable_window_function_table_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT not null - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col,state) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ DROP TABLE IF EXISTS ${nullableTableName} """ - sql """ - CREATE TABLE IF NOT EXISTS ${nullableTableName} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col,state) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName} VALUES - (21,"04-21-11",1), - (22,"04-22-10-21",0), - (22,"04-22-10-21",1), - (23,"04-23-10",1), - (24,"02-24-10-21",1); """ - - sql """ INSERT INTO ${nullableTableName} VALUES - (21,"04-21-11",1), - (22,"04-22-10-21",0), - (22,"04-22-10-21",1), - (23,"04-23-10",1), - (24,"02-24-10-21",1); """ - - qt_select_default """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) min_value, - lag(state, 10, null) over (partition by myday order by time_col) lag_value, - lead(state, 10, null) over (partition by myday order by time_col) lead_value - from ${tableName} order by myday, time_col, state; - """ - qt_select_empty_window """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) min_value, - lag(state, 2, null) over (partition by myday order by time_col) lag_value, - lead(state, 2, null) over (partition by myday order by time_col) lead_value - from ${tableName} order by myday, time_col, state; - """ - - qt_select_default_nullable """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) min_value, - lag(state, 10, null) over (partition by myday order by time_col) lag_value, - lead(state, 10, null) over (partition by myday order by time_col) lead_value - from ${nullableTableName} order by myday, time_col, state; - """ - qt_select_empty_window_nullable """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) min_value, - lag(state, 2, null) over (partition by myday order by time_col) lag_value, - lead(state, 2, null) over (partition by myday order by time_col) lead_value - from ${nullableTableName} order by myday, time_col, state; - """ - - sql "set enable_nereids_planner = 0" - - qt_select_default_old_planner """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) min_value, - lag(state, 1, null) over (partition by myday order by time_col) lag_value, - lead(state, 1, null) over (partition by myday order by time_col) lead_value - from ${tableName} order by myday, time_col, state; - """ - qt_select_empty_window_old_planner """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) min_value, - lag(state, 2, null) over (partition by myday order by time_col) lag_value, - lead(state, 2, null) over (partition by myday order by time_col) lead_value - from ${tableName} order by myday, time_col, state; - """ - - qt_select_default_nullable_old_planner """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 following) min_value, - lag(state, 1, null) over (partition by myday order by time_col) lag_value, - lead(state, 1, null) over (partition by myday order by time_col) lead_value - from ${nullableTableName} order by myday, time_col, state; - """ - qt_select_empty_window_nullable_old_planner """ - select *, - first_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) f_value, - last_value(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) l_value, - sum(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) sum_value, - avg(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) avg_value, - max(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) max_value, - min(state) over(partition by myday order by time_col rows BETWEEN 1 preceding AND 1 preceding) min_value, - lag(state, 2, null) over (partition by myday order by time_col) lag_value, - lead(state, 2, null) over (partition by myday order by time_col) lead_value - from ${nullableTableName} order by myday, time_col, state; - """ - -} diff --git a/regression-test/suites/correctness_p0/test_cast_decimal.groovy b/regression-test/suites/correctness_p0/test_cast_decimal.groovy index 17575fa0aa16ad..88f127606ed50a 100644 --- a/regression-test/suites/correctness_p0/test_cast_decimal.groovy +++ b/regression-test/suites/correctness_p0/test_cast_decimal.groovy @@ -23,7 +23,6 @@ suite("test_cast_decimal") { sql """drop table if exists test_ttt""" sql """create table test_ttt(big_key bigint)DISTRIBUTED BY HASH(big_key) BUCKETS 1 PROPERTIES ("replication_num" = "1");""" - sql """set enable_nereids_planner=false;""" sql """set enable_fold_constant_by_be = false; """ sql """SELECT 1 FROM test_ttt e1 diff --git a/regression-test/suites/correctness_p0/test_colocate_join.groovy b/regression-test/suites/correctness_p0/test_colocate_join.groovy deleted file mode 100644 index 44e401235b063f..00000000000000 --- a/regression-test/suites/correctness_p0/test_colocate_join.groovy +++ /dev/null @@ -1,311 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_colocate_join") { - - // this case check explain, so we disable nereids - sql """set enable_nereids_planner=false""" - - def db1 = "test_colocate_join_db1" - def db2 = "test_colocate_join_db2" - sql """ drop database if exists ${db1}""" - sql """ drop database if exists ${db2}""" - sql """ create database if not exists ${db1}""" - sql """ create database if not exists ${db2}""" - sql """ use ${db1}""" - - sql """ DROP TABLE IF EXISTS `test_colo1` """ - sql """ DROP TABLE IF EXISTS `test_colo2` """ - sql """ DROP TABLE IF EXISTS `test_colo3` """ - sql """ DROP TABLE IF EXISTS `test_colo4` """ - sql """ DROP TABLE IF EXISTS `test_colo5` """ - sql """ DROP TABLE IF EXISTS `test_global_tbl1` """ - sql """ DROP TABLE IF EXISTS `test_global_tbl2` """ - sql """ DROP TABLE IF EXISTS ${db2}.`test_global_tbl3` """ - - sql """ - CREATE TABLE `test_colo1` ( - `id` varchar(64) NULL, - `name` varchar(64) NULL, - `age` int NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`,`name`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`,`name`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "colocate_with" = "group", - "in_memory" = "false", - "storage_format" = "V2", - "disable_auto_compaction" = "false" - ); - """ - - sql """ - CREATE TABLE `test_colo2` ( - `id` varchar(64) NULL, - `name` varchar(64) NULL, - `age` int NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`,`name`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`,`name`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "colocate_with" = "group", - "in_memory" = "false", - "storage_format" = "V2", - "disable_auto_compaction" = "false" - ); - """ - - sql """ - CREATE TABLE `test_colo3` ( - `id` varchar(64) NULL, - `name` varchar(64) NULL, - `age` int NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`,`name`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`,`name`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "colocate_with" = "group", - "in_memory" = "false", - "storage_format" = "V2", - "disable_auto_compaction" = "false" - ); - """ - - sql """ - CREATE TABLE `test_colo4` ( - `id` varchar(64) NULL, - `name` varchar(64) NULL, - `age` int NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`,`name`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`,`name`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "colocate_with" = "group", - "in_memory" = "false", - "storage_format" = "V2", - "disable_auto_compaction" = "false" - ); - """ - - sql """ - CREATE TABLE `test_colo5` ( - `id` varchar(64) NULL, - `name` varchar(64) NULL, - `age` int NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`,`name`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`,`name`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "colocate_with" = "group", - "in_memory" = "false", - "storage_format" = "V2", - "disable_auto_compaction" = "false" - ); - """ - - sql """ - create table test_global_tbl1 ( - id int, - name varchar(100), - dt date - ) - distributed by hash(id, name) buckets 4 - properties("colocate_with" = "__global__group1", - "replication_num" = "1"); - """ - - sql """ - create table test_global_tbl2 ( - id int, - name varchar(20), - dt date, - age bigint - ) - distributed by hash(id, name) buckets 4 - properties("colocate_with" = "__global__group1", - "replication_num" = "1"); - """ - - sql """ - create table ${db2}.test_global_tbl3 ( - id int, - name varchar(50), - dt date, - age bigint - ) - partition by range(dt) ( - partition p1 values less than("2022-02-01"), - partition p2 values less than("2022-03-01"), - partition p3 values less than("2022-04-01") - ) - distributed by hash(id, name) buckets 4 - properties("colocate_with" = "__global__group1", - "replication_num" = "1"); - """ - - sql """insert into test_colo1 values('1','a',12);""" - sql """insert into test_colo2 values('1','a',12);""" - sql """insert into test_colo3 values('1','a',12);""" - sql """insert into test_colo4 values('1','a',12);""" - sql """insert into test_colo5 values('1','a',12);""" - - explain { - sql("select * from test_colo1 a inner join test_colo2 b on a.id = b.id and a.name = b.name inner join test_colo3 c on a.id=c.id and a.name= c.name inner join test_colo4 d on a.id=d.id and a.name= d.name inner join test_colo5 e on a.id=e.id and a.name= e.name;") - contains "8:VHASH JOIN\n | join op: INNER JOIN(COLOCATE[])[]" - contains "6:VHASH JOIN\n | join op: INNER JOIN(COLOCATE[])[]" - contains "4:VHASH JOIN\n | join op: INNER JOIN(COLOCATE[])[]" - contains "2:VHASH JOIN\n | join op: INNER JOIN(COLOCATE[])[]" - } - - /* test join same table but hit different rollup, should disable colocate join */ - sql """ DROP TABLE IF EXISTS `test_query_colocate`;""" - - sql """ - CREATE TABLE `test_query_colocate` ( - `datekey` int(11) NULL, - `rollup_1_condition` int null, - `rollup_2_condition` int null, - `sum_col1` bigint(20) SUM NULL, - `sum_col2` bigint(20) SUM NULL - ) ENGINE=OLAP - AGGREGATE KEY(`datekey`,`rollup_1_condition`,`rollup_2_condition`) - COMMENT "" - PARTITION BY RANGE(`datekey`) - (PARTITION p20220102 VALUES [("20220101"), ("20220102")), - PARTITION p20220103 VALUES [("20220102"), ("20220103"))) - DISTRIBUTED BY HASH(`datekey`) BUCKETS 1 - rollup ( - rollup_1(datekey, sum_col1), - rollup_2(datekey, sum_col2) - ) - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "in_memory" = "false", - "storage_format" = "V2" - ) - """ - - sql """insert into test_query_colocate values - (20220101, 102, 200, 200, 100), - (20220101, 101, 200, 200, 100), - (20220101, 102, 202, 200, 100), - (20220101, 101, 202, 200, 100);""" - - explain { - sql("select /*+SET_VAR(parallel_fragment_exec_instance_num=1,parallel_pipeline_task_num=1)*/ " + - " sum_col1,sum_col2 " + - "from " + - "(select datekey,sum(sum_col1) as sum_col1 from test_query_colocate where datekey=20220101 group by datekey) t1 " + - "left join " + - "(select datekey,sum(sum_col2) as sum_col2 from test_query_colocate where datekey=20220101 group by datekey) t2 " + - "on t1.datekey = t2.datekey") - contains "Tables are not in the same group" - } - - sql """ DROP TABLE IF EXISTS `test_query_colocate` """ - - /* test no rollup is selected */ - sql """ DROP TABLE IF EXISTS `tbl1`;""" - sql """ DROP TABLE IF EXISTS `tbl2`;""" - - sql """ - create table tbl1(k1 int, k2 varchar(32), v bigint sum) AGGREGATE KEY(k1,k2) distributed by hash(k1) buckets 1 properties('replication_num' = '1'); - """ - - sql """ - create table tbl2(k3 int, k4 varchar(32)) DUPLICATE KEY(k3) distributed by hash(k3) buckets 1 properties('replication_num' = '1'); - """ - - explain { - sql("select * from tbl1 join tbl2 on tbl1.k1 = tbl2.k3") - contains "INNER JOIN" - } - - sql """ DROP TABLE IF EXISTS `tbl1`;""" - sql """ DROP TABLE IF EXISTS `tbl2`;""" - - sql """insert into ${db1}.test_global_tbl1 values - (1,"jack", "2022-01-01"), - (2,"jack1", "2022-01-02"), - (3,"jack2", "2022-01-03"), - (4,"jack3", "2022-02-01"), - (5,"jack4", "2022-02-01"), - (6, null, "2022-03-01"); - """ - - sql """insert into ${db1}.test_global_tbl2 values - (1,"jack", "2022-01-01", 10), - (2,"jack1", "2022-01-02", 11), - (3,"jack2", "2022-01-03", 12), - (4,"jack3", "2022-02-01", 13), - (5,"jack4", "2022-02-01", 14), - (6,null, "2022-03-01", 15); - """ - - sql """insert into ${db2}.test_global_tbl3 values - (1,"jack", "2022-01-01", 10), - (2,"jack1", "2022-01-02", 11), - (3,"jack2", "2022-01-03", 12), - (4,"jack3", "2022-02-01", 13), - (5,"jack4", "2022-02-01", 14), - (6,null, "2022-03-01", 15); - """ - - order_qt_global1 """select * from ${db1}.test_global_tbl1 a join ${db1}.test_global_tbl2 b on a.id = b.id and a.name = b.name """ - order_qt_global2 """select * from ${db1}.test_global_tbl1 a join ${db2}.test_global_tbl3 b on a.id = b.id and a.name = b.name """ - - explain { - sql ("select * from ${db1}.test_global_tbl1 a join ${db1}.test_global_tbl2 b on a.id = b.id and a.name = b.name") - contains "COLOCATE" - } - explain { - sql ("select * from ${db1}.test_global_tbl1 a join ${db2}.test_global_tbl3 b on a.id = b.id and a.name = b.name") - contains "COLOCATE" - } - /* add partition */ - sql """alter table ${db2}.test_global_tbl3 add partition p4 values less than("2022-05-01")""" - sql """insert into ${db2}.test_global_tbl3 values (7, "jack7", "2022-04-01", 16)""" - order_qt_global3 """select * from ${db1}.test_global_tbl1 a join ${db2}.test_global_tbl3 b on a.id = b.id and a.name = b.name """ - explain { - sql ("select * from ${db1}.test_global_tbl1 a join ${db2}.test_global_tbl3 b on a.id = b.id and a.name = b.name") - contains "COLOCATE" - } - - /* modify group: unset */ - sql """alter table ${db2}.test_global_tbl3 set ("colocate_with" = "");""" - explain { - sql ("select * from ${db1}.test_global_tbl1 a join ${db2}.test_global_tbl3 b on a.id = b.id and a.name = b.name") - contains "Tables are not in the same group" - } - - /* modify group: from global to database level */ - sql """alter table ${db1}.test_global_tbl2 set ("colocate_with" = "db_level_group");""" - explain { - sql ("select * from ${db1}.test_global_tbl1 a join ${db1}.test_global_tbl2 b on a.id = b.id and a.name = b.name") - contains "Tables are not in the same group" - } -} diff --git a/regression-test/suites/correctness_p0/test_default_bitmap_empty.groovy b/regression-test/suites/correctness_p0/test_default_bitmap_empty.groovy new file mode 100644 index 00000000000000..60b6ff73cb37e8 --- /dev/null +++ b/regression-test/suites/correctness_p0/test_default_bitmap_empty.groovy @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_default_bitmap_empty") { + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + def tableName = "test_default_bitmap_empty" + + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + k TINYINT, + v1 bitmap NOT NULL DEFAULT bitmap_empty, + v2 INT + ) + UNIQUE KEY(K) + DISTRIBUTED BY HASH(k) + PROPERTIES("replication_num" = "1"); + """ + + // test insert into. + sql " insert into ${tableName} (k, v2) values (1, 1); " + sql " insert into ${tableName} (k, v2) values (2, 2); " + sql " insert into ${tableName} (k, v2) values (3, 3); " + sql " insert into ${tableName} (k, v2) values (4, 4); " + sql "sync" + qt_insert_into1 """ select bitmap_count(v1) from ${tableName}; """ + + // test csv stream load. + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'columns', 'k, v1=bitmap_empty(), v2' + + file 'test_default_bitmap_empty_streamload.csv' + + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_stream_load_csv1 """ select bitmap_count(v1) from ${tableName}; """ + + // test partial update + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + k TINYINT, + v1 bitmap NOT NULL DEFAULT bitmap_empty, + v2 INT + ) + UNIQUE KEY(K) + DISTRIBUTED BY HASH(k) + PROPERTIES("replication_num" = "1"); + """ + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + + sql " insert into ${tableName} (k, v2) values (1, 1); " + sql " insert into ${tableName} (k, v2) values (2, 2); " + sql " insert into ${tableName} (k, v2) values (3, 3); " + sql " insert into ${tableName} (k, v2) values (4, 4); " + sql "sync" + + qt_select_2 "select bitmap_count(v1) from ${tableName};" + + streamLoad { + table "${tableName}" + + set 'partial_columns', 'true' + set 'column_separator', ',' + set 'columns', 'k, v2' + + file 'test_default_bitmap_empty_streamload.csv' + + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_stream_load_csv2 """ select bitmap_count(v1) from ${tableName}; """ + + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + k TINYINT, + v1 bitmap BITMAP_UNION default BITMAP_EMPTY, + v2 INT replace_if_not_null + ) + aggregate KEY(K) + DISTRIBUTED BY HASH(k) + PROPERTIES("replication_num" = "1"); + """ + + // test insert into. + sql " insert into ${tableName} (k, v2) values (1, 1); " + sql " insert into ${tableName} (k, v2) values (2, 2); " + sql " insert into ${tableName} (k, v2) values (3, 3); " + sql " insert into ${tableName} (k, v2) values (4, 4); " + sql "sync" + qt_insert_into3 """ select bitmap_count(v1) from ${tableName}; """ + + // test csv stream load. + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'columns', 'k, v1=bitmap_empty(), v2' + + file 'test_default_bitmap_empty_streamload.csv' + + time 10000 // limit inflight 10s + } + + sql "sync" + + qt_stream_load_csv3 """ select bitmap_count(v1) from ${tableName}; """ +} \ No newline at end of file diff --git a/regression-test/suites/correctness_p0/test_first_value_window_legacy_planner.groovy b/regression-test/suites/correctness_p0/test_first_value_window_legacy_planner.groovy deleted file mode 100644 index 37d6e62c2f59a5..00000000000000 --- a/regression-test/suites/correctness_p0/test_first_value_window_legacy_planner.groovy +++ /dev/null @@ -1,161 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_first_value_window_legacy_planner") { - sql """ set enable_nereids_planner = false; """ - - def tableName = "test_first_value_window_state_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col,state) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName} VALUES - (21,"04-21-11",1), - (22,"04-22-10-21",0), - (22,"04-22-10-21",1), - (23,"04-23-10",1), - (24,"02-24-10-21",1); """ - - qt_select_default """ select *,first_value(state) over(partition by myday order by time_col range between current row and unbounded following) from ${tableName} order by myday, time_col, state; """ - - - def tableName1 = "test_first_value_window_array_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName1} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName1} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` ARRAY - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName1} VALUES - (21,"04-21-11",["amory", "clever"]), - (22,"04-22-10-21",["is ", "cute", "tea"]), - (22,"04-22-10-21",["doris", "aws", "greate"]), - (23,"04-23-10", ["p7", "year4"]), - (24,"02-24-10-21",[""]); """ - - qt_select_default """ select *,first_value(state) over(partition by myday order by time_col range between current row and unbounded following) from ${tableName1} order by myday, time_col; """ - - qt_select_always_nullable """ - select - *, - first_value(1) over(partition by myday order by time_col rows between 1 preceding and 1 preceding) first_value, - last_value(999) over(partition by myday order by time_col rows between 1 preceding and 1 preceding) last_value - from test_first_value_window_array_legacy_planner order by myday, time_col; - """ - - def tableName2 = "test_first_value_window_state_not_null_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName2} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName2} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT NOT NULL - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col,state) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName2} VALUES - (21,"04-21-11",1), - (22,"04-22-10-21",0), - (22,"04-22-10-21",1), - (23,"04-23-10",1), - (24,"02-24-10-21",1); """ - - qt_select_default2 """ - select * - ,first_value(state) over(partition by `myday` order by `time_col`) v1 - ,first_value(state, 0) over(partition by `myday` order by `time_col`) v2 - ,first_value(state, 1) over(partition by `myday` order by `time_col`) v3 - from ${tableName2} order by `myday`, `time_col`, `state`; - """ - - def tableName3 = "test_first_value_window_state_ignore_null_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName3} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName3} ( - `id` INT, - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT - ) ENGINE=OLAP - DUPLICATE KEY(`id`, `myday`) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`id`, `myday`) BUCKETS 4 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName3} VALUES - (1,21,"04-21-11",null), - (2,21,"04-21-12",2), - (3,21,"04-21-13",3), - (4,22,"04-22-10-21",null), - (5,22,"04-22-10-22",null), - (6,22,"04-22-10-23",5), - (7,22,"04-22-10-24",null), - (8,22,"04-22-10-25",9), - (9,23,"04-23-11",null), - (10,23,"04-23-12",10), - (11,23,"04-23-13",null), - (12,24,"02-24-10-21",null); """ - - qt_select_default3 """ - select * - ,first_value(`state`) over(partition by `myday` order by `time_col` rows between 1 preceding and 1 following) v1 - ,first_value(`state`, 0) over(partition by `myday` order by `time_col` rows between 1 preceding and 1 following) v2 - ,first_value(`state`, 1) over(partition by `myday` order by `time_col` rows between 1 preceding and 1 following) v3 - from ${tableName3} order by `id`, `myday`, `time_col`; - """ -} diff --git a/regression-test/suites/correctness_p0/test_last_value_window_legacy_planner.groovy b/regression-test/suites/correctness_p0/test_last_value_window_legacy_planner.groovy deleted file mode 100644 index de20441dcc92f9..00000000000000 --- a/regression-test/suites/correctness_p0/test_last_value_window_legacy_planner.groovy +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_last_value_window_legacy_planner") { - sql """ set enable_nereids_planner = false; """ - - def tableName = "test_last_value_window_state_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col,state) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName} VALUES - (21,"04-21-11",1), - (22,"04-22-10-21",0), - (22,"04-22-10-21",1), - (23,"04-23-10",1), - (24,"02-24-10-21",1); """ - - qt_select_default """ select *,last_value(state) over(partition by myday order by time_col) from ${tableName} order by myday, time_col, state; """ - - - def tableName1 = "test_last_value_window_array_legacy_planner" - - sql """ DROP TABLE IF EXISTS ${tableName1} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableName1} ( - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` ARRAY - ) ENGINE=OLAP - DUPLICATE KEY(`myday`,time_col) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`myday`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableName1} VALUES - (21,"04-21-11",["amory", "clever"]), - (22,"04-22-10-21",["is ", "cute", "tea"]), - (22,"04-22-10-21",["doris", "aws", "greate"]), - (23,"04-23-10", ["p7", "year4"]), - (24,"02-24-10-21",[""]); """ - - qt_select_default """ select *,last_value(state) over(partition by myday order by time_col range between current row and unbounded following) from ${tableName1} order by myday, time_col; """ - - def tableNameWithNull = "test_last_value_window_state_null_legacy_planner" - sql """ DROP TABLE IF EXISTS ${tableNameWithNull} """ - sql """ - CREATE TABLE IF NOT EXISTS ${tableNameWithNull} ( - `id` INT, - `myday` INT, - `time_col` VARCHAR(40) NOT NULL, - `state` INT - ) ENGINE=OLAP - DUPLICATE KEY(`id`,`myday`) - COMMENT "OLAP" - DISTRIBUTED BY HASH(`id`) BUCKETS 4 - PROPERTIES ( - "replication_num" = "1", - "in_memory" = "false", - "storage_format" = "V2" - ); - """ - - sql """ INSERT INTO ${tableNameWithNull} VALUES - (1,21,"04-21-11",1), - (2,21,"04-21-12",null), - (3,21,"04-21-13",null), - (4,22,"04-22-10",0), - (5,22,"04-22-11",8), - (6,22,"04-22-12",null), - (7,23,"04-23-13",null), - (8,23,"04-23-14",2), - (9,23,"04-23-15",null), - (10,23,"04-23-16",null), - (11,24,"02-24-10-22",null), - (12,24,"02-24-10-23",9), - (13,24,"02-24-10-24",null); """ - - qt_select_null """ select * - , last_value(state, false) over(partition by myday order by time_col rows between 1 preceding and 1 following) v1 - , last_value(state, true) over(partition by myday order by time_col rows between 1 preceding and 1 following) v2 - from ${tableNameWithNull} order by id, myday, time_col; - """ -} diff --git a/regression-test/suites/correctness_p0/test_push_conjuncts_inlineview.groovy b/regression-test/suites/correctness_p0/test_push_conjuncts_inlineview.groovy deleted file mode 100644 index 29663c702dff87..00000000000000 --- a/regression-test/suites/correctness_p0/test_push_conjuncts_inlineview.groovy +++ /dev/null @@ -1,189 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one - // or more contributor license agreements. See the NOTICE file - // distributed with this work for additional information - // regarding copyright ownership. The ASF licenses this file - // to you under the Apache License, Version 2.0 (the - // "License"); you may not use this file except in compliance - // with the License. You may obtain a copy of the License at - // - // http://www.apache.org/licenses/LICENSE-2.0 - // - // Unless required by applicable law or agreed to in writing, - // software distributed under the License is distributed on an - // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - // KIND, either express or implied. See the License for the - // specific language governing permissions and limitations - // under the License. - -suite("test_push_conjuncts_inlineview") { - sql """ set enable_nereids_planner=false""" - sql """ DROP TABLE IF EXISTS `push_conjunct_table` """ - sql """ - CREATE TABLE `push_conjunct_table` ( - `a_key` varchar(255) NULL , - `d_key` varchar(255) NULL , - `c_key` varchar(32) NULL , - `b_key` date NOT NULL - ) ENGINE=OLAP - UNIQUE KEY(`a_key`, `d_key`, `c_key`) - DISTRIBUTED BY HASH(`a_key`, `d_key`, `c_key`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "in_memory" = "false", - "storage_format" = "V2", - "disable_auto_compaction" = "false" - ); - """ - explain { - sql("""select - 1 - from - ( - select - rank() over( - partition by a_key - , c_key - , d_key - order by - b_key desc - ) as px - from - push_conjunct_table a - - union all - select 2 as px - from - push_conjunct_table a - )a - where - a.px = 1;""") - contains "5:VSELECT" - } - -explain { - sql("""SELECT * - FROM - (SELECT `a_key` AS `a_key` - FROM - (SELECT `b`.`a_key` AS `a_key` - FROM - (SELECT `a`.`a_key` AS `a_key` - FROM `push_conjunct_table` a) b - GROUP BY 1 ) t2 ) t1 - WHERE a_key = '123';""") - notContains "having" - contains "= '123'" - } - -explain { - sql("""SELECT * - FROM - (SELECT `a`.`a_key` AS `a_key`, - now() as d - FROM `push_conjunct_table` a) t1 - join - (SELECT `a`.`a_key` AS `a_key`, - b_key - FROM `push_conjunct_table` a) t2 - on t1. d = t2.b_key;""") - notContains "VNESTED LOOP JOIN" - } - -sql """ - WITH ttt AS - (SELECT c1, - c2, - c3, - c4, - c5, - c6, - c7 - FROM - (SELECT '10000003' c1, '0816ffk' c2, '1' c3, 1416.0800 c4, '0816ffk' c5, '2023-07-03 15:36:36' c6, 1 c7 ) a - WHERE c7 = 1 ) - SELECT dd.c1, - dd.d1 - FROM - (SELECT src.c1, - - CASE - WHEN IFNULL(src.c3,'') = '' - OR src.c3 = '3' THEN - '-1' - WHEN src.c4 = 0 THEN - '0' - WHEN src.c4 <= 200 THEN - '1' - WHEN src.c4 > 200 - AND src.c4 <= 500 THEN - '2' - WHEN src.c4 > 500 - AND src.c4 <= 1000 THEN - '3' - ELSE '4' - END AS d1 - FROM ttt src - WHERE src.c1 = '10000003' - GROUP BY src.c1, d1 ) dd - WHERE dd.d1 IN ('-1'); -""" - -explain { - sql("""SELECT max(b_key) - FROM - (SELECT a_key, - max(b_key) AS b_key - FROM - (SELECT a_key, - max(b_key) AS b_key - FROM push_conjunct_table - GROUP BY a_key - UNION all - SELECT a_key, - max(b_key) AS b_key - FROM push_conjunct_table - GROUP BY a_key) t2 - GROUP BY t2.a_key ) t - WHERE t.a_key = "abcd" - GROUP BY t.a_key;""") - notContains "having" - contains "= 'abcd'" - } - - sql """ DROP TABLE IF EXISTS `push_conjunct_table` """ - - sql """ DROP TABLE IF EXISTS `dwd_mf_wms_plate_table` """ - sql """ CREATE TABLE `dwd_mf_wms_plate_table` ( - `id` int(11) NOT NULL COMMENT '主键', - `length` float NOT NULL COMMENT '', - `created_time` datetime NULL COMMENT '创建时间' - ) ENGINE=OLAP - UNIQUE KEY(`id`) - COMMENT '' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - );""" - explain { - sql("""select created_time from( - select - ROW_NUMBER() over(order by id ) as row_num, - id, - length, - created_time - from( - select - id, - `length` , - created_time - from - dwd_mf_wms_plate_table - ) t - group by id,length,created_time - ) res - where res.created_time<'2022-02-18 09:30:13';""") - contains "VSELECT" - } - sql """ DROP TABLE IF EXISTS `dwd_mf_wms_plate_table` """ -} - diff --git a/regression-test/suites/correctness_p0/test_rand_filter.groovy b/regression-test/suites/correctness_p0/test_rand_filter.groovy index 40f5e1fe2096ad..726de7f13b260a 100644 --- a/regression-test/suites/correctness_p0/test_rand_filter.groovy +++ b/regression-test/suites/correctness_p0/test_rand_filter.groovy @@ -16,7 +16,6 @@ // under the License. suite("test_rand_filter") { - sql"""set enable_nereids_planner=false;""" sql """ DROP TABLE IF EXISTS test_rand_filter_t """ sql """ CREATE TABLE test_rand_filter_t ( diff --git a/regression-test/suites/correctness_p0/test_runtimefilter_with_window.groovy b/regression-test/suites/correctness_p0/test_runtimefilter_with_window.groovy index a843d615a63921..6ab4b0a1fe7bfa 100644 --- a/regression-test/suites/correctness_p0/test_runtimefilter_with_window.groovy +++ b/regression-test/suites/correctness_p0/test_runtimefilter_with_window.groovy @@ -86,42 +86,4 @@ sql "set disable_nereids_rules=PRUNE_EMPTY_PARTITION" on a.channel_param = b.param; """) contains "runtime filters" } - - sql """ set enable_nereids_planner=false""" - sql """ set disable_join_reorder=true""" - sql """ set enable_runtime_filter_prune=false""" - log.info("======origin planner1=================") - explain { - sql("""select a.phone - ,a.channel_param - ,a.createtime - ,rn - ,if(rn = 1,1,0) as liuzi_status - from ( - select a.phone,a.channel_param,a.createtime - ,row_number() over(partition by phone order by createtime asc) as rn - from test_runtimefilter_with_window_table2 a - ) a join ( - select param - from test_runtimefilter_with_window_table1 - ) b - on a.channel_param = b.param; """) - notContains "runtime filters" - } -log.info("======origin planner2=================") - explain { - sql("""select a.phone - ,a.channel_param - ,a.createtime - from ( - select a.phone,a.channel_param,a.createtime - from test_runtimefilter_with_window_table2 a - ) a join ( - select param - from test_runtimefilter_with_window_table1 - ) b - on a.channel_param = b.param; """) - contains "runtime filters" - } } - diff --git a/regression-test/suites/correctness_p0/test_set_operation.groovy b/regression-test/suites/correctness_p0/test_set_operation.groovy index 5ee6348a037685..0f80729571b8bb 100644 --- a/regression-test/suites/correctness_p0/test_set_operation.groovy +++ b/regression-test/suites/correctness_p0/test_set_operation.groovy @@ -114,15 +114,6 @@ suite("test_set_operation") { insert into test_B values("bb","bbbb"); """ - sql """ - set experimental_enable_nereids_planner = false; - """ - - qt_select1 """ SELECT DISTINCT * FROM((SELECT sku_code FROM test_B) INTERSECT (SELECT sku_code FROM test_B) UNION (SELECT sku_code FROM test_A)) as t order by 1; """ - - sql """ - set experimental_enable_nereids_planner = true; - """ qt_select1 """ SELECT DISTINCT * FROM((SELECT sku_code FROM test_B) INTERSECT (SELECT sku_code FROM test_B) UNION (SELECT sku_code FROM test_A)) as t order by 1; """ qt_select1 """ (select 0) intersect (select null); """ diff --git a/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_bitmap.groovy b/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_bitmap.groovy index b7f268c8253c0f..fef3a9b6c66678 100644 --- a/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_bitmap.groovy +++ b/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_bitmap.groovy @@ -17,50 +17,42 @@ suite("test_duplicate_table_bitmap") { - for (def use_nereids : [true, false]) { - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" + sql "sync;" - def tbName = "test_duplicate_bitmap1" - sql "DROP TABLE IF EXISTS ${tbName}" - sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( + def tbName = "test_duplicate_bitmap1" + sql "DROP TABLE IF EXISTS ${tbName}" + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, id_bitmap bitmap ) DUPLICATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - def result = sql "show create table ${tbName}" - logger.info("${result}") - assertTrue(result.toString().containsIgnoreCase('`id_bitmap` BITMAP NOT NULL')) + def result = sql "show create table ${tbName}" + logger.info("${result}") + assertTrue(result.toString().containsIgnoreCase('`id_bitmap` BITMAP NOT NULL')) - sql "insert into ${tbName} values(1,to_bitmap(1));" - sql "insert into ${tbName} values(2,bitmap_or(to_bitmap(3),to_bitmap(1000)));" - sql "insert into ${tbName} values(3,bitmap_or(to_bitmap(999),to_bitmap(1000),to_bitmap(888888)));" - qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k, bitmap_count(id_bitmap);" + sql "insert into ${tbName} values(1,to_bitmap(1));" + sql "insert into ${tbName} values(2,bitmap_or(to_bitmap(3),to_bitmap(1000)));" + sql "insert into ${tbName} values(3,bitmap_or(to_bitmap(999),to_bitmap(1000),to_bitmap(888888)));" + qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k, bitmap_count(id_bitmap);" - sql "insert into ${tbName} values(3,bitmap_from_string('1,0,1,2,3,1,5,99,876,2445'));" - sql "insert into ${tbName} values(1,bitmap_or(bitmap_from_string('90,5,876'),to_bitmap(1000)));" - qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k, bitmap_count(id_bitmap);" + sql "insert into ${tbName} values(3,bitmap_from_string('1,0,1,2,3,1,5,99,876,2445'));" + sql "insert into ${tbName} values(1,bitmap_or(bitmap_from_string('90,5,876'),to_bitmap(1000)));" + qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k, bitmap_count(id_bitmap);" - sql "insert into ${tbName} select * from ${tbName};" - qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k, bitmap_count(id_bitmap);" + sql "insert into ${tbName} select * from ${tbName};" + qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k, bitmap_count(id_bitmap);" - sql "DROP TABLE IF EXISTS ${tbName};" + sql "DROP TABLE IF EXISTS ${tbName};" - tbName = "test_duplicate_bitmap2" - sql "DROP TABLE IF EXISTS ${tbName}" - test { - sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( + tbName = "test_duplicate_bitmap2" + sql "DROP TABLE IF EXISTS ${tbName}" + test { + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k bitmap, v int ) DUPLICATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - exception "Key column can not set complex type:k" - } + exception "Key column can not set complex type:k" } } diff --git a/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_hll.groovy b/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_hll.groovy index 6321e5751af6bc..0c88f276a06707 100644 --- a/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_hll.groovy +++ b/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_hll.groovy @@ -17,63 +17,55 @@ suite("test_duplicate_table_hll") { - for (def use_nereids : [true, false]) { - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" + sql "sync;" - def tbName = "test_duplicate_hll1" - sql "DROP TABLE IF EXISTS ${tbName}" - sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v hll ) DUPLICATE KEY(k) + def tbName = "test_duplicate_hll1" + sql "DROP TABLE IF EXISTS ${tbName}" + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v hll ) DUPLICATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - def result = sql "show create table ${tbName}" - logger.info("${result}") - assertTrue(result.toString().containsIgnoreCase('`v` HLL NOT NULL')) + def result = sql "show create table ${tbName}" + logger.info("${result}") + assertTrue(result.toString().containsIgnoreCase('`v` HLL NOT NULL')) - def tbNameAgg = "test_duplicate_hll_agg1" - sql "DROP TABLE IF EXISTS ${tbNameAgg}" - sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v hll hll_union ) AGGREGATE KEY(k) + def tbNameAgg = "test_duplicate_hll_agg1" + sql "DROP TABLE IF EXISTS ${tbNameAgg}" + sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v hll hll_union ) AGGREGATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - sql """ insert into ${tbNameAgg} values + sql """ insert into ${tbNameAgg} values (1,hll_empty()),(2, hll_hash(100)), (2,hll_hash(0)),(2, hll_hash(4875)), (2,hll_hash(9234)),(2, hll_hash(45)), (2,hll_hash(0)),(2,hll_hash(100000)), (3,hll_hash(0)),(3,hll_hash(1)); """ - qt_sql "select k, hll_cardinality(v) from ${tbNameAgg} order by k;" - qt_sql "select HLL_UNION_AGG(v) from ${tbNameAgg};" + qt_sql "select k, hll_cardinality(v) from ${tbNameAgg} order by k;" + qt_sql "select HLL_UNION_AGG(v) from ${tbNameAgg};" - // 1. insert from aggregate table - sql "insert into ${tbName} select * from ${tbNameAgg};" - qt_from_agg "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" - qt_from_agg "select HLL_UNION_AGG(v) from ${tbName};" - // 2. insert into values - sql """ insert into ${tbName} values (4, hll_hash(100)), (1, hll_hash(999)), (2, hll_hash(0));""" - qt_from_values "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" - qt_from_values "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" - qt_from_values "select HLL_UNION_AGG(v) from ${tbName};" - // 3. insert from duplicate table - sql "insert into ${tbName} select * from ${tbName};" - qt_from_dup "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" - qt_from_dup "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" - qt_from_dup "select HLL_UNION_AGG(v) from ${tbName};" + // 1. insert from aggregate table + sql "insert into ${tbName} select * from ${tbNameAgg};" + qt_from_agg "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" + qt_from_agg "select HLL_UNION_AGG(v) from ${tbName};" + // 2. insert into values + sql """ insert into ${tbName} values (4, hll_hash(100)), (1, hll_hash(999)), (2, hll_hash(0));""" + qt_from_values "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" + qt_from_values "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" + qt_from_values "select HLL_UNION_AGG(v) from ${tbName};" + // 3. insert from duplicate table + sql "insert into ${tbName} select * from ${tbName};" + qt_from_dup "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" + qt_from_dup "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" + qt_from_dup "select HLL_UNION_AGG(v) from ${tbName};" - sql "DROP TABLE IF EXISTS ${tbName};" - sql "DROP TABLE IF EXISTS ${tbNameAgg};" + sql "DROP TABLE IF EXISTS ${tbName};" + sql "DROP TABLE IF EXISTS ${tbNameAgg};" - tbName = "test_duplicate_hll3" - sql "DROP TABLE IF EXISTS ${tbName}" - test { - sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k hll, v int) DUPLICATE KEY(k) + tbName = "test_duplicate_hll3" + sql "DROP TABLE IF EXISTS ${tbName}" + test { + sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k hll, v int) DUPLICATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - exception "Key column can not set complex type:k" - } + exception "Key column can not set complex type:k" } } diff --git a/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_quantile_state.groovy b/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_quantile_state.groovy index 66b0ca28013d49..9c4e07094b6abc 100644 --- a/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_quantile_state.groovy +++ b/regression-test/suites/data_model_p0/duplicate/storage/test_duplicate_quantile_state.groovy @@ -17,59 +17,51 @@ suite("test_duplicate_table_quantile_state") { - for (def use_nereids : [true, false]) { - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" + sql "sync;" - def tbName = "test_duplicate_quantile_state1" - sql "DROP TABLE IF EXISTS ${tbName}" - sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v QUANTILE_STATE ) DUPLICATE KEY(k) + def tbName = "test_duplicate_quantile_state1" + sql "DROP TABLE IF EXISTS ${tbName}" + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v QUANTILE_STATE ) DUPLICATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - def result = sql "show create table ${tbName}" - logger.info("${result}") - assertTrue(result.toString().containsIgnoreCase('`v` QUANTILE_STATE NOT NULL')) + def result = sql "show create table ${tbName}" + logger.info("${result}") + assertTrue(result.toString().containsIgnoreCase('`v` QUANTILE_STATE NOT NULL')) - def tbNameAgg = "test_duplicate_quantile_state_agg1" - sql "DROP TABLE IF EXISTS ${tbNameAgg}" - sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v QUANTILE_STATE QUANTILE_UNION NOT NULL ) AGGREGATE KEY(k) + def tbNameAgg = "test_duplicate_quantile_state_agg1" + sql "DROP TABLE IF EXISTS ${tbNameAgg}" + sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v QUANTILE_STATE QUANTILE_UNION NOT NULL ) AGGREGATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - sql """ insert into ${tbNameAgg} values + sql """ insert into ${tbNameAgg} values (1,to_quantile_state(-1, 2048)),(2,to_quantile_state(0, 2048)), (2,to_quantile_state(1, 2048)),(3,to_quantile_state(0, 2048)), (3,to_quantile_state(1, 2048)),(3,to_quantile_state(2, 2048));""" - qt_sql "select k, quantile_percent(v, 0), quantile_percent(v, 0.5), quantile_percent(v, 1) from ${tbNameAgg} order by k;" + qt_sql "select k, quantile_percent(v, 0), quantile_percent(v, 0.5), quantile_percent(v, 1) from ${tbNameAgg} order by k;" - // 1. insert from aggregate table - sql "insert into ${tbName} select * from ${tbNameAgg};" - qt_from_agg "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" - // 2. insert into values - sql """ insert into ${tbName} values (1, to_quantile_state(-2, 2048)), (1, to_quantile_state(0, 2048)), (2, to_quantile_state(-100, 2048));""" - qt_from_values "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" - qt_from_values """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 + // 1. insert from aggregate table + sql "insert into ${tbName} select * from ${tbNameAgg};" + qt_from_agg "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" + // 2. insert into values + sql """ insert into ${tbName} values (1, to_quantile_state(-2, 2048)), (1, to_quantile_state(0, 2048)), (2, to_quantile_state(-100, 2048));""" + qt_from_values "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" + qt_from_values """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 from ${tbName} group by k order by k, c1, c2, c3; """ - // 3. insert from duplicate table - sql "insert into ${tbName} select * from ${tbName};" - qt_from_dup "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" - qt_from_dup """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 + // 3. insert from duplicate table + sql "insert into ${tbName} select * from ${tbName};" + qt_from_dup "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" + qt_from_dup """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 from ${tbName} group by k order by k, c1, c2, c3; """ - sql "DROP TABLE IF EXISTS ${tbName};" - sql "DROP TABLE IF EXISTS ${tbNameAgg};" + sql "DROP TABLE IF EXISTS ${tbName};" + sql "DROP TABLE IF EXISTS ${tbNameAgg};" - tbName = "test_duplicate_quantile_state3" - sql "DROP TABLE IF EXISTS ${tbName}" - test { - sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k QUANTILE_STATE, v int) DUPLICATE KEY(k) + tbName = "test_duplicate_quantile_state3" + sql "DROP TABLE IF EXISTS ${tbName}" + test { + sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k QUANTILE_STATE, v int) DUPLICATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - exception "Key column can not set complex type:k" - } + exception "Key column can not set complex type:k" } } diff --git a/regression-test/suites/data_model_p0/unique/test_unique_bitmap.groovy b/regression-test/suites/data_model_p0/unique/test_unique_bitmap.groovy index a538b8509eb969..5bc6cff17151ea 100644 --- a/regression-test/suites/data_model_p0/unique/test_unique_bitmap.groovy +++ b/regression-test/suites/data_model_p0/unique/test_unique_bitmap.groovy @@ -18,18 +18,11 @@ suite("test_unique_table_bitmap") { for (def enable_mow : [true, false]) { - for (def use_nereids : [true, false]) { - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" + sql "sync;" - def tbName = "test_uniq_table_bitmap" - sql "DROP TABLE IF EXISTS ${tbName}" - sql """ + def tbName = "test_uniq_table_bitmap" + sql "DROP TABLE IF EXISTS ${tbName}" + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, id_bitmap bitmap @@ -37,20 +30,19 @@ suite("test_unique_table_bitmap") { DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "${enable_mow}"); """ - def result = sql "show create table ${tbName}" - logger.info("${result}") - assertTrue(result.toString().containsIgnoreCase('`id_bitmap` BITMAP NOT NULL')) + def result = sql "show create table ${tbName}" + logger.info("${result}") + assertTrue(result.toString().containsIgnoreCase('`id_bitmap` BITMAP NOT NULL')) - sql "insert into ${tbName} values(1,to_bitmap(1));" - sql "insert into ${tbName} values(2,bitmap_or(to_bitmap(3),to_bitmap(1000)));" - sql "insert into ${tbName} values(3,bitmap_or(to_bitmap(999),to_bitmap(1000),to_bitmap(888888)));" - qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k;" + sql "insert into ${tbName} values(1,to_bitmap(1));" + sql "insert into ${tbName} values(2,bitmap_or(to_bitmap(3),to_bitmap(1000)));" + sql "insert into ${tbName} values(3,bitmap_or(to_bitmap(999),to_bitmap(1000),to_bitmap(888888)));" + qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k;" - sql "insert into ${tbName} values(3,bitmap_from_string('1,0,1,2,3,1,5,99,876,2445'));" - sql "insert into ${tbName} values(1,bitmap_or(bitmap_from_string('90,5,876'),to_bitmap(1000)));" - qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k;" + sql "insert into ${tbName} values(3,bitmap_from_string('1,0,1,2,3,1,5,99,876,2445'));" + sql "insert into ${tbName} values(1,bitmap_or(bitmap_from_string('90,5,876'),to_bitmap(1000)));" + qt_sql "select k,bitmap_count(id_bitmap),bitmap_to_string(id_bitmap) from ${tbName} order by k;" - sql "DROP TABLE ${tbName};" - } + sql "DROP TABLE ${tbName};" } } diff --git a/regression-test/suites/data_model_p0/unique/test_unique_hll.groovy b/regression-test/suites/data_model_p0/unique/test_unique_hll.groovy index 7e80abd788a523..035f6b1cb378ed 100644 --- a/regression-test/suites/data_model_p0/unique/test_unique_hll.groovy +++ b/regression-test/suites/data_model_p0/unique/test_unique_hll.groovy @@ -18,64 +18,56 @@ suite("test_unique_table_hll") { for (def enable_mow : [true, false]) { - for (def use_nereids : [true, false]) { - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" + sql "sync;" - def tbName = "test_unique_hll1" - sql "DROP TABLE IF EXISTS ${tbName}" - sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v hll ) UNIQUE KEY(k) + def tbName = "test_unique_hll1" + sql "DROP TABLE IF EXISTS ${tbName}" + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v hll ) UNIQUE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "${enable_mow}"); """ - def result = sql "show create table ${tbName}" - logger.info("${result}") - assertTrue(result.toString().containsIgnoreCase('`v` HLL NOT NULL')) + def result = sql "show create table ${tbName}" + logger.info("${result}") + assertTrue(result.toString().containsIgnoreCase('`v` HLL NOT NULL')) - def tbNameAgg = "test_unique_hll_agg1" - sql "DROP TABLE IF EXISTS ${tbNameAgg}" - sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v hll hll_union ) AGGREGATE KEY(k) + def tbNameAgg = "test_unique_hll_agg1" + sql "DROP TABLE IF EXISTS ${tbNameAgg}" + sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v hll hll_union ) AGGREGATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - sql """ insert into ${tbNameAgg} values + sql """ insert into ${tbNameAgg} values (1,hll_empty()),(2, hll_hash(100)), (2,hll_hash(0)),(2, hll_hash(4875)), (2,hll_hash(9234)),(2, hll_hash(45)), (2,hll_hash(0)),(2,hll_hash(100000)), (3,hll_hash(0)),(3,hll_hash(1)); """ - qt_sql "select k, hll_cardinality(v) from ${tbNameAgg} order by k;" - qt_sql "select HLL_UNION_AGG(v) from ${tbNameAgg};" + qt_sql "select k, hll_cardinality(v) from ${tbNameAgg} order by k;" + qt_sql "select HLL_UNION_AGG(v) from ${tbNameAgg};" - // 1. insert from aggregate table - sql "insert into ${tbName} select * from ${tbNameAgg};" - qt_from_agg "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" - qt_from_agg "select HLL_UNION_AGG(v) from ${tbName};" - // 2. insert into values - sql """ insert into ${tbName} values (4, hll_hash(100)), (1, hll_hash(999)), (2, hll_hash(0));""" - qt_from_values "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" - qt_from_values "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" - qt_from_values "select HLL_UNION_AGG(v) from ${tbName};" - // 3. insert from UNIQUE table - sql "insert into ${tbName} select * from ${tbName};" - qt_from_uniq "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" - qt_from_uniq "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" - qt_from_uniq "select HLL_UNION_AGG(v) from ${tbName};" + // 1. insert from aggregate table + sql "insert into ${tbName} select * from ${tbNameAgg};" + qt_from_agg "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" + qt_from_agg "select HLL_UNION_AGG(v) from ${tbName};" + // 2. insert into values + sql """ insert into ${tbName} values (4, hll_hash(100)), (1, hll_hash(999)), (2, hll_hash(0));""" + qt_from_values "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" + qt_from_values "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" + qt_from_values "select HLL_UNION_AGG(v) from ${tbName};" + // 3. insert from UNIQUE table + sql "insert into ${tbName} select * from ${tbName};" + qt_from_uniq "select k, hll_cardinality(v) from ${tbName} order by k, hll_cardinality(v);" + qt_from_uniq "select k, hll_cardinality(hll_union(v)) from ${tbName} group by k order by k, hll_cardinality(hll_union(v));" + qt_from_uniq "select HLL_UNION_AGG(v) from ${tbName};" - sql "DROP TABLE IF EXISTS ${tbName};" - sql "DROP TABLE IF EXISTS ${tbNameAgg};" + sql "DROP TABLE IF EXISTS ${tbName};" + sql "DROP TABLE IF EXISTS ${tbNameAgg};" - tbName = "test_unique_hll3" - sql "DROP TABLE IF EXISTS ${tbName}" - test { - sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k hll, v int) UNIQUE KEY(k) + tbName = "test_unique_hll3" + sql "DROP TABLE IF EXISTS ${tbName}" + test { + sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k hll, v int) UNIQUE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - exception "Key column can not set complex type:k" - } + exception "Key column can not set complex type:k" } } } diff --git a/regression-test/suites/data_model_p0/unique/test_unique_quantile_state.groovy b/regression-test/suites/data_model_p0/unique/test_unique_quantile_state.groovy index 024d4e11f97c6d..9f2b2a5475a097 100644 --- a/regression-test/suites/data_model_p0/unique/test_unique_quantile_state.groovy +++ b/regression-test/suites/data_model_p0/unique/test_unique_quantile_state.groovy @@ -18,60 +18,52 @@ suite("test_unique_table_quantile_state") { for (def enable_mow : [true, false]) { - for (def use_nereids : [true, false]) { - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" + sql "sync;" - def tbName = "test_unique_quantile_state1" - sql "DROP TABLE IF EXISTS ${tbName}" - sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v QUANTILE_STATE ) UNIQUE KEY(k) + def tbName = "test_unique_quantile_state1" + sql "DROP TABLE IF EXISTS ${tbName}" + sql """ CREATE TABLE IF NOT EXISTS ${tbName} ( k int, v QUANTILE_STATE ) UNIQUE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1", "enable_unique_key_merge_on_write" = "${enable_mow}"); """ - def result = sql "show create table ${tbName}" - logger.info("${result}") - assertTrue(result.toString().containsIgnoreCase('`v` QUANTILE_STATE NOT NULL')) + def result = sql "show create table ${tbName}" + logger.info("${result}") + assertTrue(result.toString().containsIgnoreCase('`v` QUANTILE_STATE NOT NULL')) - def tbNameAgg = "test_unique_quantile_state_agg1" - sql "DROP TABLE IF EXISTS ${tbNameAgg}" - sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v QUANTILE_STATE QUANTILE_UNION NOT NULL ) AGGREGATE KEY(k) + def tbNameAgg = "test_unique_quantile_state_agg1" + sql "DROP TABLE IF EXISTS ${tbNameAgg}" + sql """ CREATE TABLE IF NOT EXISTS ${tbNameAgg} ( k int, v QUANTILE_STATE QUANTILE_UNION NOT NULL ) AGGREGATE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - sql """ insert into ${tbNameAgg} values + sql """ insert into ${tbNameAgg} values (1,to_quantile_state(-1, 2048)),(2,to_quantile_state(0, 2048)), (2,to_quantile_state(1, 2048)),(3,to_quantile_state(0, 2048)), (3,to_quantile_state(1, 2048)),(3,to_quantile_state(2, 2048));""" - qt_sql "select k, quantile_percent(v, 0), quantile_percent(v, 0.5), quantile_percent(v, 1) from ${tbNameAgg} order by k;" + qt_sql "select k, quantile_percent(v, 0), quantile_percent(v, 0.5), quantile_percent(v, 1) from ${tbNameAgg} order by k;" - // 1. insert from aggregate table - sql "insert into ${tbName} select * from ${tbNameAgg};" - qt_from_agg "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" - // 2. insert into values - sql """ insert into ${tbName} values (1, to_quantile_state(-2, 2048)), (1, to_quantile_state(0, 2048)), (2, to_quantile_state(-100, 2048));""" - qt_from_values "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" - qt_from_values """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 + // 1. insert from aggregate table + sql "insert into ${tbName} select * from ${tbNameAgg};" + qt_from_agg "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" + // 2. insert into values + sql """ insert into ${tbName} values (1, to_quantile_state(-2, 2048)), (1, to_quantile_state(0, 2048)), (2, to_quantile_state(-100, 2048));""" + qt_from_values "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" + qt_from_values """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 from ${tbName} group by k order by k, c1, c2, c3; """ - // 3. insert from UNIQUE table - sql "insert into ${tbName} select * from ${tbName};" - qt_from_uniq "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" - qt_from_uniq """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 + // 3. insert from UNIQUE table + sql "insert into ${tbName} select * from ${tbName};" + qt_from_uniq "select k, quantile_percent(v, 0) c1, quantile_percent(v, 0.5) c2, quantile_percent(v, 1) c3 from ${tbName} order by k, c1, c2, c3;" + qt_from_uniq """ select k, quantile_percent(QUANTILE_UNION(v), 0) c1, quantile_percent(QUANTILE_UNION(v), 0.5) c2, quantile_percent(QUANTILE_UNION(v), 1) c3 from ${tbName} group by k order by k, c1, c2, c3; """ - sql "DROP TABLE IF EXISTS ${tbName};" - sql "DROP TABLE IF EXISTS ${tbNameAgg};" + sql "DROP TABLE IF EXISTS ${tbName};" + sql "DROP TABLE IF EXISTS ${tbNameAgg};" - tbName = "test_unique_quantile_state3" - sql "DROP TABLE IF EXISTS ${tbName}" - test { - sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k QUANTILE_STATE, v int) UNIQUE KEY(k) + tbName = "test_unique_quantile_state3" + sql "DROP TABLE IF EXISTS ${tbName}" + test { + sql """ CREATE TABLE IF NOT EXISTS ${tbName} (k QUANTILE_STATE, v int) UNIQUE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1"); """ - exception "Key column can not set complex type:k" - } + exception "Key column can not set complex type:k" } } } diff --git a/regression-test/suites/data_model_p0/unique/test_unique_table_auto_inc.groovy b/regression-test/suites/data_model_p0/unique/test_unique_table_auto_inc.groovy index 37387508ce0527..10a5444a8e5be8 100644 --- a/regression-test/suites/data_model_p0/unique/test_unique_table_auto_inc.groovy +++ b/regression-test/suites/data_model_p0/unique/test_unique_table_auto_inc.groovy @@ -380,16 +380,8 @@ suite("test_unique_table_auto_inc") { ,(3,'EUROPE','ly final courts cajole furiously final excuse') ,(4,'MIDDLE EAST','uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl');""" qt_sql "select * from ${table11} order by r_regionkey;" - sql 'set enable_nereids_planner=true' - sql "set experimental_enable_nereids_planner=true;" - sql 'set enable_nereids_dml=true' sql "update ${table11} set r_comment = 'foobar' where r_regionkey <= 10;" qt_sql "select * from ${table11} order by r_regionkey;" - - sql 'set enable_nereids_planner=false' - sql "set experimental_enable_nereids_planner=false;" - sql "update ${table11} set r_comment = 'barfoo' where r_regionkey <= 10;" - qt_sql "select * from ${table11} order by r_regionkey;" sql "drop table if exists ${table11};" @@ -413,15 +405,8 @@ suite("test_unique_table_auto_inc") { ,(3,'EUROPE','ly final courts cajole furiously final excuse') ,(4,'MIDDLE EAST','uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl');""" qt_sql "select * from ${table12} order by r_regionkey;" - sql 'set enable_nereids_planner=true' - sql "set experimental_enable_nereids_planner=true;" - sql 'set enable_nereids_dml=true' sql """insert into ${table12} select r_regionkey, "test1", "test2" from ${table12} where r_regionkey=3;""" qt_sql "select * from ${table12} order by r_regionkey;" - sql 'set enable_nereids_planner=false' - sql "set experimental_enable_nereids_planner=false;" - sql """insert into ${table12} select r_regionkey, "test3", "test4" from ${table12} where r_regionkey=4;""" - qt_sql "select * from ${table12} order by r_regionkey;" sql "drop table if exists ${table12};" diff --git a/regression-test/suites/datatype_p0/datetimev2/test_timezone.groovy b/regression-test/suites/datatype_p0/datetimev2/test_timezone.groovy index 5c16b37802c5b3..746a78de4417b3 100644 --- a/regression-test/suites/datatype_p0/datetimev2/test_timezone.groovy +++ b/regression-test/suites/datatype_p0/datetimev2/test_timezone.groovy @@ -31,27 +31,8 @@ suite("test_timezone") { sql """ set time_zone = '+02:00' """ - sql """ set enable_nereids_planner = false """ - if (isGroupCommitMode()) { - sql """ set enable_nereids_planner = true """ - } - sql """insert into test_timezone values('2022-01-01 01:02:55', '2022-01-01 01:02:55.123')""" - sql """insert into test_timezone values('2022-02-01 01:02:55Z', '2022-02-01 01:02:55.123Z')""" - sql """insert into test_timezone values('2022-03-01 01:02:55+08:00', '2022-03-01 01:02:55.123UTC')""" - sql """insert into test_timezone values('2022-04-01T01:02:55-06:00', '2022-04-01T01:02:55.123+06:00')""" - sql """insert into test_timezone values('2022-05-01 01:02:55+02:30', '2022-05-01 01:02:55.123-02:30')""" - sql """insert into test_timezone values('2022-06-01T01:02:55+04:30', '2022-06-01 01:02:55.123-07:30')""" - sql """insert into test_timezone values('20220701010255+07:00', '20220701010255-05:00')""" - if (isGroupCommitMode()) { - sql """insert into test_timezone values('2022-07-31 21:00', '2022-08-01')""" - } else { - sql """insert into test_timezone values('20220801+05:00', '20220801America/Argentina/Buenos_Aires')""" - } - qt_legacy "select * from test_timezone order by k1" - sql """ truncate table test_timezone """ - sql """ set enable_nereids_planner = true """ sql """insert into test_timezone values('2022-01-01 01:02:55', '2022-01-01 01:02:55.123')""" sql """insert into test_timezone values('2022-02-01 01:02:55Z', '2022-02-01 01:02:55.123Z')""" sql """insert into test_timezone values('2022-05-01 01:02:55+02:30', '2022-05-01 01:02:55.123-02:30')""" diff --git a/regression-test/suites/datatype_p0/decimalv3/test_decimalv3.groovy b/regression-test/suites/datatype_p0/decimalv3/test_decimalv3.groovy index 45d1d27e584c65..d0b289ef15a51b 100644 --- a/regression-test/suites/datatype_p0/decimalv3/test_decimalv3.groovy +++ b/regression-test/suites/datatype_p0/decimalv3/test_decimalv3.groovy @@ -50,7 +50,6 @@ suite("test_decimalv3") { qt_decimalv3_6 "select * from test_decimal256 where v1 <= 9999999999999999999999999999999999999999999999999999999999999999999999.999999 order by k1, v1; " */ - sql "set experimental_enable_nereids_planner =false;" qt_aEb_test1 "select 0e0;" qt_aEb_test2 "select 1e-1" qt_aEb_test3 "select -1e-2" @@ -58,16 +57,11 @@ suite("test_decimalv3") { qt_aEb_test5 "select 123456789e-10" qt_aEb_test6 "select 0.123445e10;" - sql "set enable_nereids_planner = true;" sql "set enable_decimal256 = true;" qt_decimal256_cast_0 """ select cast("999999.999999" as decimal(76,6));""" qt_decimal256_cast_1 """select cast("9999999999999999999999999999999999999999999999999999999999999999999999.999999" as decimal(76,6));""" // test const - - // nereids - sql "set enable_nereids_planner = true;" - sql "set enable_decimal256 = true;" qt_decimal256_const_0 "select 1.4E-45;" qt_decimal256_const_1 "select 1.4E-80;" @@ -82,16 +76,6 @@ suite("test_decimalv3") { qt_decimal256_const_6 "select 1.4E-45;" qt_decimal256_const_7 "select 1.4E-80;" - // not nereids - sql "set enable_nereids_planner = false;" - sql "set enable_decimal256 = true;" - qt_decimal256_const_8 "select 1.4E-45;" - qt_decimal256_const_9 "select 1.4E-80;" - sql "set enable_decimal256 = false;" - qt_decimal256_const_10 "select 1.4E-45;" - qt_decimal256_const_11 "select 1.4E-80;" - - sql "set enable_nereids_planner = true;" sql "set enable_decimal256 = true;" sql "drop table if exists test_decimal256_cast_str;" sql """ create table test_decimal256_cast_str(k1 int, v1 char(128)) diff --git a/regression-test/suites/datatype_p0/decimalv3/test_load.groovy b/regression-test/suites/datatype_p0/decimalv3/test_load.groovy index 013e524412554a..d1dc38ca342189 100644 --- a/regression-test/suites/datatype_p0/decimalv3/test_load.groovy +++ b/regression-test/suites/datatype_p0/decimalv3/test_load.groovy @@ -131,7 +131,6 @@ suite("test_load") { select * from test_sys_update_basic_test_update_decimal_tb order by 1, 2; """ - sql """ set enable_nereids_planner=false; """ sql """ drop TABLE if exists test_sys_update_basic_test_update_decimal_tb; """ diff --git a/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_functions_array_with_const.groovy b/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_functions_array_with_const.groovy index 087054d35e25e1..98690b35493853 100644 --- a/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_functions_array_with_const.groovy +++ b/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_functions_array_with_const.groovy @@ -16,39 +16,6 @@ // under the License. suite("test_array_functions_array_with_const", "p0") { - sql "set enable_nereids_planner=false;" - //array_with_constant - qt_old_sql "SELECT 'array_with_constant';" - order_qt_old_sql "SELECT array_with_constant(3, number) FROM numbers limit 10;" - order_qt_old_sql "SELECT array_with_constant(number, 'Hello') FROM numbers limit 10;" - // not support const expression -// order_qt_sql "SELECT array_with_constant(number % 3, number % 2 ? 'Hello' : NULL) FROM numbers limit 10;" - // mistake:No matching function with signature: array_with_constant(INT, ARRAY) -// order_qt_sql "SELECT array_with_constant(number, []) FROM numbers limit 10;" - order_qt_old_sql "SELECT array_with_constant(2, 'qwerty'), array_with_constant(0, -1), array_with_constant(1, 1);" - // -- { serverError } - try { - sql """ - SELECT array_with_constant(-231.37104, -138); - """ - } catch (Exception ex) { - assertTrue(ex.getMessage().contains("Array size should in range(0, 1000000) in function")) - } - - // -- {server for large array} - try { - sql """ - SELECT array_with_constant(1000001, 1); - """ - } catch (Exception ex) { - assertTrue(ex.getMessage().contains("Array size should in range(0, 1000000) in function")) - } - - - // nereids - sql "set enable_nereids_planner=true;" - sql "set enable_fallback_to_original_planner=false;" - //array_with_constant qt_nereid_sql "SELECT 'array_with_constant';" order_qt_nereid_sql "SELECT array_with_constant(3, number) FROM numbers limit 10;" diff --git a/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.groovy b/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.groovy index 7ec3942c331f25..82ed0c8a73c956 100644 --- a/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.groovy +++ b/regression-test/suites/datatype_p0/nested_types/query/array_functions/test_array_zip_array_enumerate_uniq.groovy @@ -16,30 +16,24 @@ // under the License. suite("test_array_zip_array_enumerate_uniq", "p0") { - sql "set enable_nereids_planner=false;" // ========== array-zip ========== // wrong case - try { - sql """ - SELECT array_zip(); - """ - } catch (Exception ex) { - assertTrue(ex.getMessage().contains("errCode = 2, detailMessage =")) + test { + sql """ SELECT array_zip() """ + exception "errCode = 2, detailMessage =" } - try { - sql """ - SELECT array_zip(['a', 'b', 'c'], ['d', 'e', 'f', 'd']); - """ - } catch (Exception ex) { - assertTrue(ex.getMessage().contains("function array_zip's 2-th argument should have same offsets with first argument")) + test { + sql """ SELECT array_zip(['a', 'b', 'c'], ['d', 'e', 'f', 'd']) """ + exception """function array_zip's 2-th argument should have same offsets with first argument""" } + // nereid not support array_enumerate_uniq // ============= array_enumerate_uniq ========= qt_sql "SELECT 'array_enumerate_uniq';" - order_qt_old_sql """ SELECT array_enumerate_uniq(array_enumerate_uniq(array(cast(10 as LargeInt), cast(100 as LargeInt), cast(2 as LargeInt))), array(cast(123 as LargeInt), cast(1023 as LargeInt), cast(123 as LargeInt))); """ + order_qt_nereid_sql """ SELECT array_enumerate_uniq(array_enumerate_uniq(array(cast(10 as LargeInt), cast(100 as LargeInt), cast(2 as LargeInt))), array(cast(123 as LargeInt), cast(1023 as LargeInt), cast(123 as LargeInt))); """ - order_qt_old_sql """SELECT array_enumerate_uniq( + order_qt_nereid_sql """SELECT array_enumerate_uniq( [111111, 222222, 333333], [444444, 555555, 666666], [111111, 222222, 333333], @@ -48,8 +42,8 @@ suite("test_array_zip_array_enumerate_uniq", "p0") { [444444, 555555, 666666], [111111, 222222, 333333], [444444, 555555, 666666]);""" - order_qt_old_sql """SELECT array_enumerate_uniq(array(STDDEV_SAMP(910947.571364)), array(NULL)) from numbers;""" - //order_qt_sql """ SELECT max(array_join(arr)) FROM (SELECT array_enumerate_uniq(group_array(DIV(number, 54321)) AS nums, group_array(cast(DIV(number, 98765) as string))) AS arr FROM (SELECT number FROM numbers LIMIT 1000000) GROUP BY bitmap_hash(number) % 100000);""" + order_qt_nereid_sql """SELECT array_enumerate_uniq(array(STDDEV_SAMP(910947.571364)), array(NULL)) from numbers;""" +// //order_qt_sql """ SELECT max(array_join(arr)) FROM (SELECT array_enumerate_uniq(group_array(DIV(number, 54321)) AS nums, group_array(cast(DIV(number, 98765) as string))) AS arr FROM (SELECT number FROM numbers LIMIT 1000000) GROUP BY bitmap_hash(number) % 100000);""" sql """ DROP TABLE IF EXISTS ARRAY_BIGINT_DATA;""" sql """ CREATE TABLE IF NOT EXISTS `ARRAY_BIGINT_DATA` ( @@ -64,50 +58,6 @@ suite("test_array_zip_array_enumerate_uniq", "p0") { sql """ INSERT INTO ARRAY_BIGINT_DATA VALUES (0, [-1, 0, 1, 2, -9223372036854775808, 9223372036854775807, 1]);""" sql """ INSERT INTO ARRAY_BIGINT_DATA VALUES (1, []);""" - test { - sql """ select array_enumerate_uniq((select data from ARRAY_BIGINT_DATA where id = 0), (select data from ARRAY_BIGINT_DATA where id = 1), (select data from ARRAY_BIGINT_DATA where id = 1));""" - exception ("A subquery should not return Array/Map/Struct type") - } - - - // nereids - sql "set enable_nereids_planner=true;" - sql "set enable_fallback_to_original_planner=false;" -// ========== array-zip ========== -// wrong case - try { - sql """ - SELECT array_zip(); - """ - } catch (Exception ex) { - assertTrue(ex.getMessage().size() > 0) - } - - try { - sql """ - SELECT array_zip(['a', 'b', 'c'], ['d', 'e', 'f', 'd']); - """ - } catch (Exception ex) { - assertTrue(ex.getMessage().contains("function array_zip's 2-th argument should have same offsets with first argument")) - } - - // nereid not support array_enumerate_uniq - // ============= array_enumerate_uniq ========= - qt_sql "SELECT 'array_enumerate_uniq';" - order_qt_nereid_sql """ SELECT array_enumerate_uniq(array_enumerate_uniq(array(cast(10 as LargeInt), cast(100 as LargeInt), cast(2 as LargeInt))), array(cast(123 as LargeInt), cast(1023 as LargeInt), cast(123 as LargeInt))); """ - - order_qt_nereid_sql """SELECT array_enumerate_uniq( - [111111, 222222, 333333], - [444444, 555555, 666666], - [111111, 222222, 333333], - [444444, 555555, 666666], - [111111, 222222, 333333], - [444444, 555555, 666666], - [111111, 222222, 333333], - [444444, 555555, 666666]);""" - order_qt_nereid_sql """SELECT array_enumerate_uniq(array(STDDEV_SAMP(910947.571364)), array(NULL)) from numbers;""" -// //order_qt_sql """ SELECT max(array_join(arr)) FROM (SELECT array_enumerate_uniq(group_array(DIV(number, 54321)) AS nums, group_array(cast(DIV(number, 98765) as string))) AS arr FROM (SELECT number FROM numbers LIMIT 1000000) GROUP BY bitmap_hash(number) % 100000);""" - test { sql """ select array_enumerate_uniq((select data from ARRAY_BIGINT_DATA where id = 0), (select data from ARRAY_BIGINT_DATA where id = 1), (select data from ARRAY_BIGINT_DATA where id = 1));""" exception ("lengths of all arrays of function array_enumerate_uniq must be equal") diff --git a/regression-test/suites/datatype_p0/nested_types/query/map_functions/test_map_with_agg.groovy b/regression-test/suites/datatype_p0/nested_types/query/map_functions/test_map_with_agg.groovy index eee045fd2ed48e..9a583bbfc8a156 100644 --- a/regression-test/suites/datatype_p0/nested_types/query/map_functions/test_map_with_agg.groovy +++ b/regression-test/suites/datatype_p0/nested_types/query/map_functions/test_map_with_agg.groovy @@ -39,13 +39,6 @@ suite("test_map_with_agg", "p0") { sql """ set parallel_pipeline_task_num=5; """ - // test in old planner - sql """set enable_nereids_planner=false""" - order_qt_old_sql """ SELECT id, c_char, map('exp_sea', 1) as m FROM t_map_count WHERE p1 = 'comr' AND p2 = 'ex' GROUP BY 1,2 - union all - SELECT id, c_char, map('exp_seac', count(CASE WHEN et = 'page_l' THEN uid END )) as m FROM t_map_count WHERE p1 = 'consumer-un' AND p2 = '17469174857s957ssf' GROUP BY 1,2;""" - - // test in nereids planner sql """set enable_nereids_planner=true""" sql """ set enable_fallback_to_original_planner=false""" diff --git a/regression-test/suites/datatype_p0/scalar_types/get_assignment_compatible_type.groovy b/regression-test/suites/datatype_p0/scalar_types/get_assignment_compatible_type.groovy index f982c01cda3714..020abf83d74f39 100644 --- a/regression-test/suites/datatype_p0/scalar_types/get_assignment_compatible_type.groovy +++ b/regression-test/suites/datatype_p0/scalar_types/get_assignment_compatible_type.groovy @@ -16,7 +16,6 @@ // under the License. suite("get_assignment_compatible_type") { - sql 'set enable_nereids_planner=false' sql "drop table if exists test_decimal_boolean" sql """create table test_decimal_boolean ( id int, diff --git a/regression-test/suites/ddl_p0/test_create_or_replace_view.groovy b/regression-test/suites/ddl_p0/test_create_or_replace_view.groovy new file mode 100644 index 00000000000000..2ea993580e1061 --- /dev/null +++ b/regression-test/suites/ddl_p0/test_create_or_replace_view.groovy @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_create_or_replace_view") { + // create two test tables and insert some data + sql """DROP TABLE IF EXISTS test_create_or_replace_view_tbl1""" + sql """ + CREATE TABLE IF NOT EXISTS test_create_or_replace_view_tbl1 + (k1 int, k2 int, v int) + DUPLICATE KEY(k1) DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES( "replication_num" = "1"); + """ + sql """DROP TABLE IF EXISTS test_create_or_replace_view_tbl2""" + sql """ + CREATE TABLE IF NOT EXISTS test_create_or_replace_view_tbl2 + (k1 int, k2 int, v int) + DUPLICATE KEY(k1) DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES( "replication_num" = "1"); + """ + sql """INSERT INTO test_create_or_replace_view_tbl1 VALUES(1,1,1)""" + sql """INSERT INTO test_create_or_replace_view_tbl2 VALUES(2,2,2)""" + sql "sync" + + // create view + sql "drop view if exists view_test_create_or_replace_view" + sql """ + CREATE VIEW IF NOT EXISTS view_test_create_or_replace_view + AS SELECT * FROM test_create_or_replace_view_tbl1; + """ + qt_sql_1 """select * from view_test_create_or_replace_view""" + + sql """ + CREATE OR REPLACE VIEW view_test_create_or_replace_view + AS SELECT * FROM test_create_or_replace_view_tbl2; + """ + qt_sql_2 """select * from view_test_create_or_replace_view""" + test { + sql """ + CREATE OR REPLACE VIEW IF NOT EXISTS view_test_create_or_replace_view + AS SELECT * FROM test_create_or_replace_view_tbl1; + """ + exception "[OR REPLACE] and [IF NOT EXISTS] cannot used at the same time" + } + + sql """drop view if exists view_test_create_or_replace_view""" + sql """DROP TABLE IF EXISTS test_create_or_replace_view_tbl1""" + sql """DROP TABLE IF EXISTS test_create_or_replace_view_tbl2""" +} diff --git a/regression-test/suites/ddl_p0/test_create_table_generated_column/test_create_table_generated_column_legacy.groovy b/regression-test/suites/ddl_p0/test_create_table_generated_column/test_create_table_generated_column_legacy.groovy deleted file mode 100644 index bd6d3900585741..00000000000000 --- a/regression-test/suites/ddl_p0/test_create_table_generated_column/test_create_table_generated_column_legacy.groovy +++ /dev/null @@ -1,249 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_create_table_generated_column_legacy") { - // test legacy planner create - sql "SET enable_nereids_planner=false;" - sql "drop table if exists test_gen_col_common_legacy" - qt_common_default """create table test_gen_col_common_legacy(a int,b int,c double generated always as (abs(a+b)) not null) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - ;""" - sql "drop table if exists test_gen_col_without_generated_always_legacy" - qt_common_without_generated_always """create table test_gen_col_without_generated_always_legacy(a int,b int,c double as (abs(a+b)) not null) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - ;""" - sql "drop table if exists test_gen_col_in_middle_legacy" - qt_gencol_in_middle """create table test_gen_col_in_middle_legacy(a int,c double generated always as (abs(a+b)) not null,b int) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1");""" - sql "drop table if exists gencol_refer_gencol_legacy" - qt_gencol_refer_gencol """ - create table gencol_refer_gencol_legacy(a int,c double generated always as (abs(a+b)) not null,b int, d int generated always as(c+1)) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - sql "drop table if exists test_gen_col_array_func_legacy" - qt_gencol_array_function_create """ - create table test_gen_col_array_func_legacy(pk int,a array,b array, c array generated always as (array_union(a,b)) not null) - DISTRIBUTED BY HASH(pk) - PROPERTIES("replication_num" = "1"); - ; - """ - sql "drop table if exists test_gen_col_element_at_func_legacy" - qt_gencol_array_function_element_at_create """ - create table test_gen_col_element_at_func_legacy(pk int,a array,b array, c int generated always as (element_at(a, 1)) not null) - DISTRIBUTED BY HASH(pk) - PROPERTIES("replication_num" = "1"); - ; - """ - test { - sql """ - create table gencol_type_check(a int,b int, c array generated always as (abs(a+b,3)) not null) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "In generated column 'c', no matching function with signature" - } - - // gencol_has_sum - test { - sql """ - create table gencol_has_sum(a int,b int, c int generated always as (sum(a)) not null) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Expression of generated column 'c' contains a disallowed function" - } - - // gencol_has_column_not_define - test { - sql """ - create table gencol_has_sum(a int,b int, c int generated always as (abs(d)) not null) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Unknown column 'd' in 'generated column function'" - } - - // gencol_refer_gencol_after - test { - sql """ - create table gencol_refer_gencol_legacy(a int,c double generated always as (abs(a+d)) not null,b int, d int generated always as(c+1)) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Generated column can refer only to generated columns defined prior to it." - } - - sql "set @myvar=2" - // gencol_has_var - test { - sql """ - create table test_gen_col_not_null100(a varchar(10),c double generated always as (abs(a+b+@myvar)) not null,b int) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Generated column expression cannot contain variable." - } - - test { - sql """ - create table test_gen_col_auto_increment(a bigint not null auto_increment, b int, c int as (a*b)) - distributed by hash(a) properties("replication_num" = "1"); - """ - exception "Generated column 'c' cannot refer to auto-increment column." - } - - test{ - sql """ - create table test_gen_col_subquery(a int,b int, c int generated always as (a+(select 1)) not null) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Generated column does not support subquery." - } - - test { - sql """ - create table test_gen_col_array_func_lambda(pk int,a array,b array, c array generated always as (array_count(x->(x%2=0),b)) not null) - DISTRIBUTED BY HASH(pk) - PROPERTIES("replication_num" = "1"); - """ - exception "Generated column does not support lambda." - } - - test { - sql """ - create table test_gen_col_array_func_legacy(pk int,a array,b array, c double generated always as (a+b) not null) - DISTRIBUTED BY HASH(pk) - PROPERTIES("replication_num" = "1"); - """ - exception "In generated column 'c', can not cast from origin type" - } - - test { - sql """ - create table test_window_func(a int default 10, b int default 100, c boolean as(rank() over())) DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1");""" - exception "Expression of generated column 'c' contains a disallowed expression:'rank() OVER ()'" - } - test { - sql """ - create table test_grouping(a int default 10, b int default 100, c boolean as(grouping(a))) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1");""" - exception "Expression of generated column 'c' contains a disallowed function:'grouping'" - } - - sql "SET enable_nereids_planner=true;" - sql "SET enable_fallback_to_original_planner=false;" - qt_common_default_insert "INSERT INTO test_gen_col_common_legacy values(6,7,default);" - qt_common_default_insert_with_specific_column "INSERT INTO test_gen_col_common_legacy(a,b) values(1,2);" - qt_common_default_test_insert_default "INSERT INTO test_gen_col_common_legacy values(3,5,default);" - qt_commont_default_select "select * from test_gen_col_common_legacy order by 1,2,3;" - - // qt_common_default_test_insert_null - test { - sql "INSERT INTO test_gen_col_common_legacy(a,b) values(1,null);" - def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode" - exception exception_str - } - - // qt_common_default_test_insert_gencol - test { - sql "INSERT INTO test_gen_col_common_legacy values(1,2,3);" - exception "The value specified for generated column 'c' in table 'test_gen_col_common_legacy' is not allowed." - } - - - qt_common_without_generated_always_insert "INSERT INTO test_gen_col_without_generated_always_legacy values(6,7,default);" - qt_common_without_generated_always_insert_with_specific_column "INSERT INTO test_gen_col_without_generated_always_legacy(a,b) values(1,2);" - qt_commont_without_generated_always_select "select * from test_gen_col_without_generated_always_legacy order by 1,2,3;" - - - qt_gencol_in_middle_insert "insert into test_gen_col_in_middle_legacy values(1,default,5);" - qt_gencol_in_middle_insert_with_specific_column "insert into test_gen_col_in_middle_legacy(a,b) values(4,5);" - qt_gencol_in_middle_insert_with_specific_column_2 "insert into test_gen_col_in_middle_legacy(a,b,c) values(1,6,default);" - qt_gencol_in_middle_select "select * from test_gen_col_in_middle_legacy order by 1,2,3;" - - - qt_gencol_refer_gencol_insert "insert into gencol_refer_gencol_legacy values(1,default,5,default);" - qt_gencol_refer_gencol_insert2 "insert into gencol_refer_gencol_legacy(a,b) values(5,6);" - qt_gencol_refer_gencol_insert3 "insert into gencol_refer_gencol_legacy(a,b,c) values(2,9,default);" - qt_gencol_refer_gencol_insert4 "insert into gencol_refer_gencol_legacy(a,b,c,d) values(3,3,default,default);" - qt_gencol_refer_gencol_select "select * from gencol_refer_gencol_legacy order by 1,2,3,4;" - - - qt_gencol_array_function_insert "insert into test_gen_col_array_func_legacy values(1,[1,2],[3,2],default);" - qt_gencol_array_function_select "select * from test_gen_col_array_func_legacy" - - - qt_gencol_array_function_element_at_insert "insert into test_gen_col_element_at_func_legacy values(1,[1,2],[3,2],default);" - qt_gencol_array_function_element_at_select "select * from test_gen_col_element_at_func_legacy" - - test { - sql """ - create table test_gen_col_aggregate(a int,b int,c int sum generated always as (abs(a+1)) not null) - aggregate key(a,b) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "The generated columns can be key columns, or value columns of replace and replace_if_not_null aggregation type." - } - - // test drop dependency - sql "drop table if exists gencol_refer_gencol_legacy" - qt_gencol_refer_gencol """ - create table gencol_refer_gencol_legacy(a int,c double generated always as (abs(a+b)) not null,b int, d int generated always as(c+1)) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - sql "insert into gencol_refer_gencol_legacy(a,b) values(3,4)" - test { - sql "alter table gencol_refer_gencol_legacy drop column a" - exception "Column 'a' has a generated column dependency on :[c]" - } - test { - sql "alter table gencol_refer_gencol_legacy drop column c" - exception "Column 'c' has a generated column dependency on :[d]" - } - sql "alter table gencol_refer_gencol_legacy drop column d" - sql "alter table gencol_refer_gencol_legacy drop column c" - sql "alter table gencol_refer_gencol_legacy drop column b" - qt_test_drop_column "select * from gencol_refer_gencol_legacy" - test { - sql """ - create table test_gen_col_default(a int,b int,c int generated always as (abs(a+1)) not null default 10) - aggregate key(a,b,c) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Generated columns cannot have default value." - } - test { - sql """ - create table test_gen_col_increment(a int,b int,c int generated always as (abs(a+1)) not null auto_increment) - aggregate key(a,b,c) - DISTRIBUTED BY HASH(a) - PROPERTIES("replication_num" = "1"); - """ - exception "Generated columns cannot be auto_increment." - } - -} \ No newline at end of file diff --git a/regression-test/suites/ddl_p0/test_create_table_generated_column/test_delete_generated_column.groovy b/regression-test/suites/ddl_p0/test_create_table_generated_column/test_delete_generated_column.groovy index 09b4b3dfaa2b81..8cbe723d929c65 100644 --- a/regression-test/suites/ddl_p0/test_create_table_generated_column/test_delete_generated_column.groovy +++ b/regression-test/suites/ddl_p0/test_create_table_generated_column/test_delete_generated_column.groovy @@ -45,25 +45,25 @@ suite("test_generated_column_delete") { insert into test_par_gen_col_unique values(1,2,default),(10,2,default),(2,22,default),(10,2,default); """ - qt_delete_where_gen_col "delete from test_par_gen_col partition p1 where c=3;" + sql "delete from test_par_gen_col partition p1 where c=3;" qt_delete_where_gen_col_select "select * from test_par_gen_col order by a,b,c;" - qt_delete_where_gen_col_partition_has_no_satisfied_row "delete from test_par_gen_col partition p1 where c=12;" + sql "delete from test_par_gen_col partition p1 where c=12;" qt_delete_where_gen_col_partition_has_no_satisfied_row_select "select * from test_par_gen_col order by a,b,c;;" - qt_delete_where_gen_col_and_other_col "delete from test_par_gen_col partition p2 where c=12 and a=10;" + sql "delete from test_par_gen_col partition p2 where c=12 and a=10;" qt_delete_where_gen_col_and_other_col_select "select * from test_par_gen_col order by a,b,c;;" - qt_delete_where_gen_col_unique "delete from test_par_gen_col_unique partition p1 where c=3;" + sql "delete from test_par_gen_col_unique partition p1 where c=3;" qt_delete_where_gen_col_select_unique "select * from test_par_gen_col_unique order by a,b,c;;" - qt_delete_where_gen_col_partition_has_no_satisfied_row_unique "delete from test_par_gen_col_unique partition p1 where c=12;" + sql "delete from test_par_gen_col_unique partition p1 where c=12;" qt_delete_where_gen_col_partition_has_no_satisfied_row_select_unique "select * from test_par_gen_col_unique order by a,b,c;;" - qt_delete_where_gen_col_and_other_col_unique "delete from test_par_gen_col_unique partition p2 where c=12 and a=10;" + sql "delete from test_par_gen_col_unique partition p2 where c=12 and a=10;" qt_delete_where_gen_col_and_other_col_select_unique "select * from test_par_gen_col_unique order by a,b,c;" - qt_delete_query """delete from test_par_gen_col_unique t1 using test_par_gen_col t2 inner join test_par_gen_col t3 + sql """delete from test_par_gen_col_unique t1 using test_par_gen_col t2 inner join test_par_gen_col t3 on t2.b=t3.b where t1.c=t2.c and t1.b=t2.b""" qt_delete_query_select "select * from test_par_gen_col_unique order by a,b,c;" sql "insert into test_par_gen_col_unique values(1,2,default),(10,2,default),(2,22,default),(10,2,default);" - qt_delete_query_cte """ + sql """ with cte as( select t2.* from test_par_gen_col t2 inner join test_par_gen_col t3 on t2.b=t3.b diff --git a/regression-test/suites/ddl_p0/test_create_table_like_nereids.groovy b/regression-test/suites/ddl_p0/test_create_table_like_nereids.groovy index e6ca0b696ffb88..a371f5ac051ea8 100644 --- a/regression-test/suites/ddl_p0/test_create_table_like_nereids.groovy +++ b/regression-test/suites/ddl_p0/test_create_table_like_nereids.groovy @@ -46,7 +46,8 @@ suite("test_create_table_like_nereids") { // with all rollup sql "drop table if exists table_like_with_roll_up" sql "CREATE TABLE table_like_with_roll_up LIKE mal_test_create_table_like with rollup;" - waitForRollUpJob("mal_test_create_table_like", 5000, 2) + waitForRollUpJob("mal_test_create_table_like", "r1", 60000) + waitForRollUpJob("mal_test_create_table_like", "r2", 60000) explain { sql ("select sum(a) from table_like_with_roll_up group by a") contains "ru1" @@ -59,7 +60,7 @@ suite("test_create_table_like_nereids") { // with partial rollup sql "drop table if exists table_like_with_partial_roll_up;" sql "CREATE TABLE table_like_with_partial_roll_up LIKE mal_test_create_table_like with rollup (ru1);" - waitForRollUpJob("mal_test_create_table_like", 5000, 2) + waitForRollUpJob("mal_test_create_table_like", "r1", 60000) sql "select * from table_like_with_partial_roll_up order by pk, a, b" explain { sql("select sum(a) from table_like_with_partial_roll_up group by a") @@ -78,7 +79,7 @@ suite("test_create_table_like_nereids") { sql "drop table if exists table_like_with_partial_roll_up_exists" sql """CREATE TABLE if not exists table_like_with_partial_roll_up_exists LIKE mal_test_create_table_like with rollup (ru1);""" - waitForRollUpJob("mal_test_create_table_like", 5000, 2) + waitForRollUpJob("mal_test_create_table_like", "r1", 60000) sql "drop table if exists test_create_table_like_char_255" sql """ diff --git a/regression-test/suites/ddl_p0/test_create_table_without_distribution.groovy b/regression-test/suites/ddl_p0/test_create_table_without_distribution.groovy index 655b2f82be5898..86f314bb808c33 100644 --- a/regression-test/suites/ddl_p0/test_create_table_without_distribution.groovy +++ b/regression-test/suites/ddl_p0/test_create_table_without_distribution.groovy @@ -29,7 +29,6 @@ suite("test_create_table_without_distribution") { def res1 = sql "show create table test_create_table_without_distribution;" mustContain(res1[0][1], "DISTRIBUTED BY RANDOM BUCKETS AUTO") - sql "SET enable_nereids_planner=false;" multi_sql """ drop table if exists test_create_table_without_distribution; create table test_create_table_without_distribution(a int, b int) properties ("replication_num"="1") diff --git a/regression-test/suites/ddl_p0/test_create_view.groovy b/regression-test/suites/ddl_p0/test_create_view.groovy deleted file mode 100644 index 0cc42f8fcc1616..00000000000000 --- a/regression-test/suites/ddl_p0/test_create_view.groovy +++ /dev/null @@ -1,237 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_create_view") { - sql "SET enable_nereids_planner=false" - sql """DROP TABLE IF EXISTS count_distinct""" - sql """ - CREATE TABLE IF NOT EXISTS count_distinct - ( - RQ DATE NOT NULL COMMENT "日期", - v1 VARCHAR(100) NOT NULL COMMENT "字段1", - v2 VARCHAR(100) NOT NULL COMMENT "字段2", - v3 VARCHAR(100) REPLACE_IF_NOT_NULL COMMENT "字段3" - ) - AGGREGATE KEY(RQ,v1,v2) - PARTITION BY RANGE(RQ) - ( - PARTITION p20220908 VALUES LESS THAN ('2022-09-09') - ) - DISTRIBUTED BY HASH(v1,v2) BUCKETS 3 - PROPERTIES( - "replication_num" = "1", - "dynamic_partition.enable" = "true", - "dynamic_partition.time_unit" = "DAY", - "dynamic_partition.start" = "-3", - "dynamic_partition.end" = "3", - "dynamic_partition.prefix" = "p", - "dynamic_partition.buckets" = "3" - ); - """ - sql "drop view if exists test_count_distinct" - sql """ - CREATE VIEW IF NOT EXISTS test_count_distinct - ( - RQ comment "日期", - v1 comment "v1", - v2 comment "v2", - v3 comment "v3" - ) - AS - select aa.RQ as RQ, aa.v1 as v1,aa.v2 as v2 , bb.v3 as v3 from - ( - select RQ, count(distinct v1) as v1 , count(distinct v2 ) as v2 - from count_distinct - group by RQ - ) aa - LEFT JOIN - ( - select RQ, max(v3) as v3 - from count_distinct - group by RQ - ) bb - on aa.RQ = bb.RQ; - """ - - sql """select * from test_count_distinct""" - sql """DROP VIEW IF EXISTS test_count_distinct""" - sql """DROP TABLE IF EXISTS count_distinct""" - - sql """DROP TABLE IF EXISTS t1""" - sql """ - CREATE TABLE `t1` ( - k1 int, - k2 date, - v1 int - ) ENGINE=OLAP - UNIQUE KEY(`k1`,`k2`) - COMMENT '测试' - PARTITION BY RANGE(k2) ( - PARTITION p1 VALUES [('2023-07-01'), ('2023-07-10')), - PARTITION p2 VALUES [('2023-07-11'), ('2023-07-20')) - ) - DISTRIBUTED BY HASH(`k1`) BUCKETS 3 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - );""" - sql """DROP TABLE IF EXISTS t2""" - sql """ - CREATE TABLE `t2` ( - k1 int, - k2 date, - v1 int - ) ENGINE=OLAP - UNIQUE KEY(`k1`,`k2`) - COMMENT '测试' - PARTITION BY RANGE(k2) ( - PARTITION p1 VALUES [('2023-07-01'), ('2023-07-05')), - PARTITION p2 VALUES [('2023-07-05'), ('2023-07-15')) - ) - DISTRIBUTED BY HASH(`k1`) BUCKETS 3 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ); """ - sql """ - CREATE VIEW IF NOT EXISTS my_view AS - SELECT t1.* FROM t1 PARTITION(p1) JOIN t2 PARTITION(p2) ON t1.k1 = t2.k1; """ - sql """SELECT * FROM my_view""" - sql """DROP VIEW IF EXISTS my_view""" - sql """DROP TABLE IF EXISTS t1""" - sql """DROP TABLE IF EXISTS t2""" - - - sql """DROP TABLE IF EXISTS view_baseall""" - sql """DROP VIEW IF EXISTS test_view7""" - sql """DROP VIEW IF EXISTS test_view8""" - sql """ - CREATE TABLE `view_baseall` ( - `k1` int(11) NULL, - `k3` array NULL - ) ENGINE=OLAP - DUPLICATE KEY(`k1`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`k1`) BUCKETS 5 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "is_being_synced" = "false", - "storage_format" = "V2", - "light_schema_change" = "true", - "disable_auto_compaction" = "false", - "enable_single_replica_compaction" = "false" - ); - """ - sql """insert into view_baseall values(1,[1,2,3]);""" - sql """insert into view_baseall values(2,[10,-2,8]);""" - sql """insert into view_baseall values(3,[-1,20,0]);""" - - qt_test_view_1 """ select * from view_baseall order by k1; """ - qt_test_view_2 """ select *, array_map(x->x>0,k3) from view_baseall order by k1; """ - qt_test_view_3 """ select *, array_filter(x->x>0,k3),array_filter(`k3`, array_map(x -> x > 0, `k3`)) from view_baseall order by k1; """ - - - sql """ - create view IF NOT EXISTS test_view7 (k1,k2,k3,k4) as - select *, array_filter(x->x>0,k3),array_filter(`k3`, array_map(x -> x > 0, `k3`)) from view_baseall order by k1; - """ - qt_test_view_4 """ select * from test_view7 order by k1; """ - - sql """ - create view IF NOT EXISTS test_view8 (k1,k2,k3) as - select *, array_map(x->x>0,k3) from view_baseall order by k1; - """ - qt_test_view_5 """ select * from test_view8 order by k1; """ - - sql """DROP TABLE IF EXISTS view_column_name_test""" - sql """ - CREATE TABLE IF NOT EXISTS view_column_name_test - ( - `timestamp` DATE NOT NULL COMMENT "['0000-01-01', '9999-12-31']", - `type` TINYINT NOT NULL COMMENT "[-128, 127]", - `error_code` INT COMMENT "[-2147483648, 2147483647]", - `error_msg` VARCHAR(300) COMMENT "[1-65533]", - `op_id` BIGINT COMMENT "[-9223372036854775808, 9223372036854775807]", - `op_time` DATETIME COMMENT "['0000-01-01 00:00:00', '9999-12-31 23:59:59']", - `target` float COMMENT "4 字节", - `source` double COMMENT "8 字节", - `lost_cost` decimal(12,2) COMMENT "", - `remark` string COMMENT "1m size", - `op_userid` LARGEINT COMMENT "[-2^127 + 1 ~ 2^127 - 1]", - `plate` SMALLINT COMMENT "[-32768, 32767]", - `iscompleted` boolean COMMENT "true 或者 false" - ) - DISTRIBUTED BY HASH(`type`) BUCKETS 1 - PROPERTIES ('replication_num' = '1'); - """ - - sql """ - DROP VIEW IF EXISTS v1 - """ - sql """ - CREATE VIEW v1 AS - SELECT - error_code, - 1, - 'string', - now(), - dayofyear(op_time), - cast (source AS BIGINT), - min(`timestamp`) OVER ( - ORDER BY - op_time DESC ROWS BETWEEN UNBOUNDED PRECEDING - AND 1 FOLLOWING - ), - 1 > 2, - 2 + 3, - 1 IN (1, 2, 3, 4), - remark LIKE '%like', - CASE WHEN remark = 's' THEN 1 ELSE 2 END, - TRUE | FALSE - FROM - view_column_name_test - """ - qt_test_view_6 """ SHOW VIEW FROM view_column_name_test;""" - - sql """ drop view if exists xxx;""" - sql """CREATE VIEW - `xxx` COMMENT 'VIEW' AS - WITH - CHENGBENJIA AS ( - SELECT - RN - FROM - ( - SELECT - row_number() OVER ( - PARTITION BY `A`.`timestamp`, - `A`.`type` - ORDER BY - CAST( - concat( - CAST(`A`.`error_msg` AS VARCHAR(*)), - CAST(`A`.`remark` AS VARCHAR(*)) - ) AS INT - ) DESC NULLS LAST - ) AS `RN` - FROM - view_column_name_test A - ) A - ) - SELECT - * from CHENGBENJIA;""" - sql """select * from xxx;""" - sql """ drop view if exists xxx;""" -} diff --git a/regression-test/suites/delete_p0/fold_constant/fold_constant.groovy b/regression-test/suites/delete_p0/fold_constant/fold_constant.groovy index ea36b02166ce42..6294ba3421f9b4 100644 --- a/regression-test/suites/delete_p0/fold_constant/fold_constant.groovy +++ b/regression-test/suites/delete_p0/fold_constant/fold_constant.groovy @@ -62,7 +62,6 @@ suite("fold_constant") { qt_select "select 10.0/0, 0.0/10" sql """ set enable_fold_constant_by_be = true; """ - sql """ set experimental_enable_nereids_planner = false; """ qt_select_coalesce_bool "select coalesce(true);" qt_select_coalesce_tinyint "select coalesce(cast(1 as tinyint));" qt_select_coalesce_smallint "select coalesce(cast(1 as smallint))" diff --git a/regression-test/suites/delete_p0/test_delete.groovy b/regression-test/suites/delete_p0/test_delete.groovy index 0b8623e7f52911..8a66e763328709 100644 --- a/regression-test/suites/delete_p0/test_delete.groovy +++ b/regression-test/suites/delete_p0/test_delete.groovy @@ -449,7 +449,6 @@ suite("test_delete") { "replication_allocation" = "tag.location.default: 1" ); """ - sql "set experimental_enable_nereids_planner = false;" sql "delete from test3 where statistic_date >= date_sub('2024-01-16',INTERVAL 1 day);" sql "drop table if exists bi_acti_per_period_plan" @@ -485,7 +484,6 @@ suite("test_delete") { INSERT INTO bi_acti_per_period_plan (proj_id,proj_name,proj_start_date,proj_end_date,last_data_date,data_date,data_batch_num,la_sum_base_proj_id,sum_base_proj_id,today_date,count,count_type,bl_count) VALUES (4508,'建筑工程项目A','2023-05-30 00:00:00','2024-03-07 00:00:00','2023-06-01 00:00:00','2023-08-15 00:00:00','2024-01-31 00:00:00','4509','4509','2023-08-27 00:00:00',5,'plan',4); """ - sql "set experimental_enable_nereids_planner = false;" sql "set @data_batch_num='2024-01-31 00:00:00';" sql "delete from bi_acti_per_period_plan where data_batch_num =@data_batch_num; " diff --git a/regression-test/suites/delete_p0/test_delete_on_mor.groovy b/regression-test/suites/delete_p0/test_delete_on_mor.groovy index 0cd2af70a2cab0..b39642058d8cf8 100644 --- a/regression-test/suites/delete_p0/test_delete_on_mor.groovy +++ b/regression-test/suites/delete_p0/test_delete_on_mor.groovy @@ -19,20 +19,12 @@ suite("test_delete_on_mor") { String db = context.config.getDbNameByFile(context.file) sql "select 1;" // to create database - for (def use_nereids_planner : [false, true]) { - logger.info("current params: use_nereids_planner: ${use_nereids_planner}") - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql "use ${db};" - if (use_nereids_planner) { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" - def tableA = "test_delete_on_mor1" - sql """ DROP TABLE IF EXISTS ${tableA} """ - sql """ CREATE TABLE IF NOT EXISTS ${tableA} ( + def tableA = "test_delete_on_mor1" + sql """ DROP TABLE IF EXISTS ${tableA} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableA} ( `user_id` LARGEINT NOT NULL COMMENT "用户id", `username` VARCHAR(50) NOT NULL COMMENT "用户昵称", `city` VARCHAR(20) COMMENT "用户所在城市", @@ -45,9 +37,9 @@ suite("test_delete_on_mor") { "replication_allocation" = "tag.location.default: 1" );""" - def tableB = "test_delete_on_mor2" - sql """ DROP TABLE IF EXISTS ${tableB} """ - sql """ CREATE TABLE IF NOT EXISTS ${tableB} ( + def tableB = "test_delete_on_mor2" + sql """ DROP TABLE IF EXISTS ${tableB} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableB} ( `user_id` LARGEINT NOT NULL COMMENT "用户id", `username` VARCHAR(50) NOT NULL COMMENT "用户昵称", `city` VARCHAR(20) COMMENT "用户所在城市", @@ -60,7 +52,7 @@ suite("test_delete_on_mor") { "replication_allocation" = "tag.location.default: 1" );""" - sql """insert into ${tableA} values + sql """insert into ${tableA} values (10000,"u1","北京",19,1), (10000,"u1","北京",20,1), (10001,"u3","北京",30,0), @@ -69,7 +61,7 @@ suite("test_delete_on_mor") { (10004,"u6","重庆",35,1), (10004,"u7","重庆",35,1); """ - sql """insert into ${tableB} values + sql """insert into ${tableB} values (10000,"u1","北京",18,1), (10000,"u1","北京",20,1), (10001,"u3","北京",30,0), @@ -77,17 +69,17 @@ suite("test_delete_on_mor") { (10003,"u5","广州",32,0), (10004,"u6","深圳",35,1), (10004,"u7","深圳",35,1); """ - - sql "sync;" - qt_sql "select * from ${tableA} order by user_id;" - qt_sql "select * from ${tableB} order by user_id;" - sql """ DELETE FROM ${tableA} a USING ${tableB} b + + sql "sync;" + qt_sql "select * from ${tableA} order by user_id;" + qt_sql "select * from ${tableB} order by user_id;" + sql """ DELETE FROM ${tableA} a USING ${tableB} b WHERE a.user_id = b.user_id AND a.city = b.city and b.city = '北京' AND b.age = 20;""" - sql "sync;" - qt_sql "select * from ${tableA} order by user_id;" + sql "sync;" + qt_sql "select * from ${tableA} order by user_id;" - sql """DELETE from ${tableA} USING + sql """DELETE from ${tableA} USING ( SELECT a.user_id, a.city FROM ${tableA} a @@ -95,10 +87,9 @@ suite("test_delete_on_mor") { WHERE ${tableB}.city = '上海' AND ${tableB}.age = 20 ) AS matched_rows WHERE ${tableA}.user_id = matched_rows.user_id AND ${tableA}.city = matched_rows.city; """ - qt_sql "select * from ${tableA} order by user_id;" + qt_sql "select * from ${tableA} order by user_id;" - sql "DROP TABLE IF EXISTS ${tableA};" - sql "DROP TABLE IF EXISTS ${tableB};" - } + sql "DROP TABLE IF EXISTS ${tableA};" + sql "DROP TABLE IF EXISTS ${tableB};" } } diff --git a/regression-test/suites/delete_p0/test_delete_with_sync_mv.groovy b/regression-test/suites/delete_p0/test_delete_with_sync_mv.groovy index ad9fdb3f752332..0ddf48d2d4298b 100644 --- a/regression-test/suites/delete_p0/test_delete_with_sync_mv.groovy +++ b/regression-test/suites/delete_p0/test_delete_with_sync_mv.groovy @@ -72,5 +72,9 @@ suite("test_delete_with_sync_mv") { from test_delete_with_sync_mv; """) + explain { + sql """delete from test_delete_with_sync_mv where l_orderkey = 2""" + contains "IS_PARTIAL_UPDATE: false" + } sql """delete from test_delete_with_sync_mv where l_orderkey = 2""" } diff --git a/regression-test/suites/demo_p0/explain_action.groovy b/regression-test/suites/demo_p0/explain_action.groovy index 511b9509360a3f..3c0fb002f69196 100644 --- a/regression-test/suites/demo_p0/explain_action.groovy +++ b/regression-test/suites/demo_p0/explain_action.groovy @@ -17,15 +17,12 @@ suite("explain_action") { - // this case check explain, so we disable nereids - sql """set enable_nereids_planner=false""" - explain { sql("select 100") - // contains("OUTPUT EXPRS:\n 100\n") && contains("PARTITION: UNPARTITIONED\n") - contains "OUTPUT EXPRS:\n 100\n" - contains "PARTITION: UNPARTITIONED\n" + // contains("100") && contains("UNION") + contains "100" + contains "UNION" } explain { diff --git a/regression-test/suites/export_p2/test_export_max_file_size.groovy b/regression-test/suites/export_p2/test_export_max_file_size.groovy index a66ac35d026cce..a9e41ab94eaa95 100644 --- a/regression-test/suites/export_p2/test_export_max_file_size.groovy +++ b/regression-test/suites/export_p2/test_export_max_file_size.groovy @@ -20,10 +20,15 @@ suite("test_export_max_file_size", "p2") { sql """ set enable_nereids_planner=true """ sql """ set enable_fallback_to_original_planner=false """ - String nameNodeHost = context.config.otherConfigs.get("extHiveHmsHost") - String hdfsPort = context.config.otherConfigs.get("extHdfsPort") - String fs = "hdfs://${nameNodeHost}:${hdfsPort}" - String user_name = context.config.otherConfigs.get("extHiveHmsUser") + String dfsNameservices=context.config.otherConfigs.get("dfsNameservices") + String dfsHaNamenodesHdfsCluster=context.config.otherConfigs.get("dfsHaNamenodesHdfsCluster") + String dfsNamenodeRpcAddress1=context.config.otherConfigs.get("dfsNamenodeRpcAddress1") + String dfsNamenodeRpcAddress2=context.config.otherConfigs.get("dfsNamenodeRpcAddress2") + String dfsNamenodeRpcAddress3=context.config.otherConfigs.get("dfsNamenodeRpcAddress3") + String dfsNameservicesPort=context.config.otherConfigs.get("dfsNameservicesPort") + String hadoopSecurityAuthentication =context.config.otherConfigs.get("hadoopSecurityAuthentication") + String hadoopKerberosKeytabPath =context.config.otherConfigs.get("hadoopKerberosKeytabPath") + String hadoopKerberosPrincipal =context.config.otherConfigs.get("hadoopKerberosPrincipal") def table_export_name = "test_export_max_file_size" @@ -76,10 +81,19 @@ suite("test_export_max_file_size", "p2") { sql """ insert into ${table_export_name} select * from hdfs( - "uri" = "hdfs://${nameNodeHost}:${hdfsPort}${load_data_path}", - "hadoop.username" = "${user_name}", - "column_separator" = ",", - "format" = "csv"); + "uri" = "hdfs://${dfsNameservices}${load_data_path}", + "format" = "csv", + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" + ); """ @@ -104,17 +118,24 @@ suite("test_export_max_file_size", "p2") { def uuid = UUID.randomUUID().toString() // exec export sql """ - EXPORT TABLE ${table_export_name} TO "${fs}${outFilePath}" + EXPORT TABLE ${table_export_name} TO "hdfs://${dfsNameservices}${outFilePath}" PROPERTIES( "label" = "${uuid}", "format" = "${format}", - "column_separator"=",", "max_file_size" = "5MB", "delete_existing_files"="${isDelete}" ) with HDFS ( - "fs.defaultFS"="${fs}", - "hadoop.username" = "${user_name}" + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" ); """ @@ -129,16 +150,26 @@ suite("test_export_max_file_size", "p2") { sql """ insert into ${table_load_name} select * from hdfs( - "uri" = "${outfile_url}${j}.csv", - "hadoop.username" = "${user_name}", - "column_separator" = ",", - "format" = "csv"); + "uri" = "${outfile_url}${j}.csv", + "format" = "csv", + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" + ); """ } } // begin test test_export('csv', 'csv', true); - order_qt_select """ select * from ${table_load_name} order by user_id limit 1000""" + order_qt_select """ select * from ${table_load_name} order by user_id limit 1000 """ + order_qt_select_cnt """ select count(*) from ${table_load_name} """ } diff --git a/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy b/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy index 8fbd923b516b65..1c8b3660ed52c1 100644 --- a/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy +++ b/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy @@ -21,10 +21,15 @@ suite("test_outfile_orc_max_file_size", "p2") { sql """ set enable_fallback_to_original_planner=false """ - String nameNodeHost = context.config.otherConfigs.get("extHiveHmsHost") - String hdfsPort = context.config.otherConfigs.get("extHdfsPort") - String fs = "hdfs://${nameNodeHost}:${hdfsPort}" - String user_name = context.config.otherConfigs.get("extHiveHmsUser") + String dfsNameservices=context.config.otherConfigs.get("dfsNameservices") + String dfsHaNamenodesHdfsCluster=context.config.otherConfigs.get("dfsHaNamenodesHdfsCluster") + String dfsNamenodeRpcAddress1=context.config.otherConfigs.get("dfsNamenodeRpcAddress1") + String dfsNamenodeRpcAddress2=context.config.otherConfigs.get("dfsNamenodeRpcAddress2") + String dfsNamenodeRpcAddress3=context.config.otherConfigs.get("dfsNamenodeRpcAddress3") + String dfsNameservicesPort=context.config.otherConfigs.get("dfsNameservicesPort") + String hadoopSecurityAuthentication =context.config.otherConfigs.get("hadoopSecurityAuthentication") + String hadoopKerberosKeytabPath =context.config.otherConfigs.get("hadoopKerberosKeytabPath") + String hadoopKerberosPrincipal =context.config.otherConfigs.get("hadoopKerberosPrincipal") // the path used to load data def load_data_path = "/user/export_test/test_orc_max_file_size.orc" @@ -62,25 +67,45 @@ suite("test_outfile_orc_max_file_size", "p2") { sql """ insert into ${table_export_name} select * from hdfs( - "uri" = "hdfs://${nameNodeHost}:${hdfsPort}${load_data_path}", - "hadoop.username" = "${user_name}", - "format" = "orc"); + "uri" = "hdfs://${dfsNameservices}${load_data_path}", + "format" = "orc", + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" + ); """ def test_outfile_orc_success = {maxFileSize, isDelete, fileNumber, totalRows -> - table = sql """ + def table = sql """ select * from ${table_export_name} - into outfile "${fs}${outFilePath}" + into outfile "hdfs://${dfsNameservices}${outFilePath}" FORMAT AS ORC PROPERTIES( - "fs.defaultFS"="${fs}", - "hadoop.username" = "${user_name}", "max_file_size" = "${maxFileSize}", - "delete_existing_files"="${isDelete}" + "delete_existing_files"="${isDelete}", + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" ); """ - assertTrue(table.size() == 1) - assertTrue(table[0].size == 4) + + log.info("table = " + table); + // assertTrue(table.size() == 1) + // assertTrue(table[0].size() == 4) log.info("outfile result = " + table[0]) assertEquals(table[0][0], fileNumber) assertEquals(table[0][1], totalRows) @@ -90,13 +115,21 @@ suite("test_outfile_orc_max_file_size", "p2") { test { sql """ select * from ${table_export_name} - into outfile "${fs}${outFilePath}" + into outfile "hdfs://${dfsNameservices}${outFilePath}" FORMAT AS ORC PROPERTIES( - "fs.defaultFS"="${fs}", - "hadoop.username" = "${user_name}", "max_file_size" = "${maxFileSize}", - "delete_existing_files"="${isDelete}" + "delete_existing_files"="${isDelete}", + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" ); """ diff --git a/regression-test/suites/export_p2/test_outfile_p2.groovy b/regression-test/suites/export_p2/test_outfile_p2.groovy index 429b7a88f7c4c1..5472273b3eb28b 100644 --- a/regression-test/suites/export_p2/test_outfile_p2.groovy +++ b/regression-test/suites/export_p2/test_outfile_p2.groovy @@ -15,11 +15,18 @@ // specific language governing permissions and limitations // under the License. -suite("test_outfile_hdfs", "p2") { - String nameNodeHost = context.config.otherConfigs.get("extHiveHmsHost") - String hdfsPort = context.config.otherConfigs.get("extHdfsPort") - String fs = "hdfs://${nameNodeHost}:${hdfsPort}" - String user_name = "hadoop" +suite("test_outfile_p2", "p2") { + + String dfsNameservices=context.config.otherConfigs.get("dfsNameservices") + String dfsHaNamenodesHdfsCluster=context.config.otherConfigs.get("dfsHaNamenodesHdfsCluster") + String dfsNamenodeRpcAddress1=context.config.otherConfigs.get("dfsNamenodeRpcAddress1") + String dfsNamenodeRpcAddress2=context.config.otherConfigs.get("dfsNamenodeRpcAddress2") + String dfsNamenodeRpcAddress3=context.config.otherConfigs.get("dfsNamenodeRpcAddress3") + String dfsNameservicesPort=context.config.otherConfigs.get("dfsNameservicesPort") + String hadoopSecurityAuthentication =context.config.otherConfigs.get("hadoopSecurityAuthentication") + String hadoopKerberosKeytabPath =context.config.otherConfigs.get("hadoopKerberosKeytabPath") + String hadoopKerberosPrincipal =context.config.otherConfigs.get("hadoopKerberosPrincipal") + def table_outfile_name = "test_outfile_hdfs" // create table and insert @@ -38,11 +45,23 @@ suite("test_outfile_hdfs", "p2") { // use a simple sql to make sure there is only one fragment // #21343 - sql """select * from ${table_outfile_name} INTO OUTFILE '${fs}/user/outfile_test/' - FORMAT AS PARQUET PROPERTIES + sql """ + SELECT * FROM ${table_outfile_name} + INTO OUTFILE "hdfs://${dfsNameservices}/user/outfile_test/" + FORMAT AS parquet + PROPERTIES ( - 'hadoop.username' = '${user_name}', - 'fs.defaultFS'='${fs}' + "dfs.data.transfer.protection" = "integrity", + 'dfs.nameservices'="${dfsNameservices}", + 'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}", + 'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}", + 'hadoop.security.authentication'="${hadoopSecurityAuthentication}", + 'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}", + 'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}", + 'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" ); """ + } diff --git a/regression-test/suites/external_table_p0/hive/test_hms_event_notification.groovy b/regression-test/suites/external_table_p0/hive/test_hms_event_notification.groovy index 52724b807d38c7..a3fa4750225d03 100644 --- a/regression-test/suites/external_table_p0/hive/test_hms_event_notification.groovy +++ b/regression-test/suites/external_table_p0/hive/test_hms_event_notification.groovy @@ -22,7 +22,7 @@ suite("test_hms_event_notification", "p0,external,hive,external_docker,external_ return; } for (String useMetaCache : ["true","false"] ) { - for (String hivePrefix : [ "hive2","hive3"]) { + for (String hivePrefix : ["hive3"]) { try { setHivePrefix(hivePrefix) hive_docker """ set hive.stats.autogather=false; """ diff --git a/regression-test/suites/external_table_p0/hive/test_hms_event_notification_multi_catalog.groovy b/regression-test/suites/external_table_p0/hive/test_hms_event_notification_multi_catalog.groovy index 24c2ac3b7fb907..e89475f043dcbf 100644 --- a/regression-test/suites/external_table_p0/hive/test_hms_event_notification_multi_catalog.groovy +++ b/regression-test/suites/external_table_p0/hive/test_hms_event_notification_multi_catalog.groovy @@ -24,7 +24,7 @@ suite("test_hms_event_notification_multi_catalog", "p0,external,hive,external_do for (String useMetaCache : ["true","false"] ) { - for (String hivePrefix : [ "hive2","hive3"]) { + for (String hivePrefix : ["hive3"]) { try { setHivePrefix(hivePrefix) hive_docker """ set hive.stats.autogather=false; """ diff --git a/regression-test/suites/external_table_p0/iceberg/write/test_iceberg_create_table.groovy b/regression-test/suites/external_table_p0/iceberg/write/test_iceberg_create_table.groovy index d76c6a4b05260a..22364b8bc96c07 100644 --- a/regression-test/suites/external_table_p0/iceberg/write/test_iceberg_create_table.groovy +++ b/regression-test/suites/external_table_p0/iceberg/write/test_iceberg_create_table.groovy @@ -22,55 +22,66 @@ suite("test_iceberg_create_table", "p0,external,doris,external_docker,external_d return } - String rest_port = context.config.otherConfigs.get("iceberg_rest_uri_port") - String minio_port = context.config.otherConfigs.get("iceberg_minio_port") - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String catalog_name = "test_iceberg_create_table" + String[][] ret = sql"""show variables like 'enable_fallback_to_original_planner';""" + boolean need_set_false = ret.size() == 1 && ret[0][1] == "true" + if (need_set_false) { + sql """ set enable_fallback_to_original_planner=false; """ + } - sql """drop catalog if exists ${catalog_name}""" - sql """ - CREATE CATALOG ${catalog_name} PROPERTIES ( - 'type'='iceberg', - 'iceberg.catalog.type'='rest', - 'uri' = 'http://${externalEnvIp}:${rest_port}', - "s3.access_key" = "admin", - "s3.secret_key" = "password", - "s3.endpoint" = "http://${externalEnvIp}:${minio_port}", - "s3.region" = "us-east-1" - );""" + try { + String rest_port = context.config.otherConfigs.get("iceberg_rest_uri_port") + String minio_port = context.config.otherConfigs.get("iceberg_minio_port") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + String catalog_name = "test_iceberg_create_table" - sql """ switch ${catalog_name} """ + sql """drop catalog if exists ${catalog_name}""" + sql """ + CREATE CATALOG ${catalog_name} PROPERTIES ( + 'type'='iceberg', + 'iceberg.catalog.type'='rest', + 'uri' = 'http://${externalEnvIp}:${rest_port}', + "s3.access_key" = "admin", + "s3.secret_key" = "password", + "s3.endpoint" = "http://${externalEnvIp}:${minio_port}", + "s3.region" = "us-east-1" + );""" - String db1 = catalog_name + "_db1" - String tb1 = db1 + "_tb1" - String tb2 = db1 + "_tb2" + sql """ switch ${catalog_name} """ - sql """ drop table if exists ${db1}.${tb1} """ - sql """ drop table if exists ${db1}.${tb2} """ - sql """ drop database if exists ${db1} """ + String db1 = catalog_name + "_db1" + String tb1 = db1 + "_tb1" + String tb2 = db1 + "_tb2" - sql """ create database ${db1} """ + sql """ drop table if exists ${db1}.${tb1} """ + sql """ drop table if exists ${db1}.${tb2} """ + sql """ drop database if exists ${db1} """ - test { - sql """ create table ${db1}.${tb1} (id int) engine = olap """ - exception "Cannot create olap table out of internal catalog. Make sure 'engine' type is specified when use the catalog: ${catalog_name}" - } + sql """ create database ${db1} """ - test { - sql """ create table ${db1}.${tb1} (id int) engine = hive """ - exception "java.sql.SQLException: errCode = 2, detailMessage = Iceberg type catalog can only use `iceberg` engine." - } + test { + sql """ create table ${db1}.${tb1} (id int) engine = olap """ + exception "Cannot create olap table out of internal catalog. Make sure 'engine' type is specified when use the catalog: ${catalog_name}" + } - test { - sql """ create table ${db1}.${tb1} (id int) engine = jdbc """ - exception "java.sql.SQLException: errCode = 2, detailMessage = Iceberg type catalog can only use `iceberg` engine." - } + test { + sql """ create table ${db1}.${tb1} (id int) engine = hive """ + exception "java.sql.SQLException: errCode = 2, detailMessage = Iceberg type catalog can only use `iceberg` engine." + } - sql """ create table ${db1}.${tb1} (id int) engine = iceberg """ - sql """ create table ${db1}.${tb2} (id int) """ + test { + sql """ create table ${db1}.${tb1} (id int) engine = jdbc """ + exception "java.sql.SQLException: errCode = 2, detailMessage = Iceberg type catalog can only use `iceberg` engine." + } - sql """ drop table ${db1}.${tb1} """ - sql """ drop table ${db1}.${tb2} """ - sql """ drop database ${db1} """ + sql """ create table ${db1}.${tb1} (id int) engine = iceberg """ + sql """ create table ${db1}.${tb2} (id int) """ + sql """ drop table ${db1}.${tb1} """ + sql """ drop table ${db1}.${tb2} """ + sql """ drop database ${db1} """ + } finally { + if (need_set_false) { + sql """ set enable_fallback_to_original_planner=true; """ + } + } } diff --git a/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_mysql.groovy b/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_mysql.groovy index eee9e50d65de6f..1ba4afc52cc59e 100644 --- a/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_mysql.groovy +++ b/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_mysql.groovy @@ -887,7 +887,7 @@ suite("test_jdbc_query_mysql", "p0,external,mysql,external_docker,external_docke order_qt_sql84 """ SELECT NULL, NULL INTERSECT SELECT NULL, NULL FROM $jdbcMysql57Table1 """ order_qt_sql85 """ SELECT COUNT(*) FROM $jdbcMysql57Table1 INTERSECT SELECT COUNT(k8) FROM $jdbcMysql57Table1 HAVING SUM(k7) IS NOT NULL """ order_qt_sql86 """ SELECT k8 FROM $jdbcMysql57Table1 WHERE k8 < 7 EXCEPT SELECT k8 FROM $jdbcMysql57Table1 WHERE k8 > 21 """ - order_qt_sql87 """ SELECT row_number() OVER (PARTITION BY k7) rn, k8 FROM $jdbcMysql57Table1 LIMIT 3 """ + order_qt_sql87 """ SELECT row_number() OVER (PARTITION BY k7 ORDER BY k8) rn, k8 FROM $jdbcMysql57Table1 LIMIT 3 """ order_qt_sql88 """ SELECT row_number() OVER (PARTITION BY k7 ORDER BY k8) rn FROM $jdbcMysql57Table1 LIMIT 3 """ order_qt_sql89 """ SELECT row_number() OVER (ORDER BY k8) rn FROM $jdbcMysql57Table1 LIMIT 3 """ order_qt_sql90 """ SELECT row_number() OVER () FROM $jdbcMysql57Table1 as a JOIN ${exMysqlTable} as b ON a.k8 = b.id WHERE a.k8 > 111 LIMIT 2 """ diff --git a/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy b/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy index 492fdeb349be02..623bd6e8932105 100644 --- a/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy +++ b/regression-test/suites/external_table_p0/jdbc/test_jdbc_query_pg.groovy @@ -575,7 +575,7 @@ suite("test_jdbc_query_pg", "p0,external,pg,external_docker,external_docker_pg") order_qt_sql84 """ SELECT NULL, NULL INTERSECT SELECT NULL, NULL FROM $jdbcPg14Table1 """ order_qt_sql85 """ SELECT COUNT(*) FROM $jdbcPg14Table1 INTERSECT SELECT COUNT(k8) FROM $jdbcPg14Table1 HAVING SUM(k7) IS NOT NULL """ order_qt_sql86 """ SELECT k8 FROM $jdbcPg14Table1 WHERE k8 < 7 EXCEPT SELECT k8 FROM $jdbcPg14Table1 WHERE k8 > 21 """ - order_qt_sql87 """ SELECT row_number() OVER (PARTITION BY k7) rn, k8 FROM $jdbcPg14Table1 LIMIT 3 """ + order_qt_sql87 """ SELECT row_number() OVER (PARTITION BY k7 order by k8) rn, k8 FROM $jdbcPg14Table1 LIMIT 3 """ order_qt_sql88 """ SELECT row_number() OVER (PARTITION BY k7 ORDER BY k8) rn FROM $jdbcPg14Table1 LIMIT 3 """ order_qt_sql89 """ SELECT row_number() OVER (ORDER BY k8) rn FROM $jdbcPg14Table1 LIMIT 3 """ order_qt_sql90 """ SELECT row_number() OVER () FROM $jdbcPg14Table1 as a JOIN ${dorisExTable1} as b ON a.k8 = b.id WHERE a.k8 > 111 LIMIT 2 """ diff --git a/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy b/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy index 0be7d0ad396c3b..388287c9b8a1ad 100644 --- a/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy +++ b/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy @@ -628,15 +628,6 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc qt_sql """desc ctas_partition_text_2""" // ctas logic is different between new and old planner. // so need to test both. - // the old planner's test can be removed once the old planner is removed. - sql """set enable_nereids_planner=false""" - // 1. test text type column as distribution col - sql """create table ctas_partition_text_3 distributed by hash(text) buckets 1 properties("replication_num" = "1") as select int_u, text, text as t2 from mysql_conjuncts.doris_test.all_types;""" - qt_sql """desc ctas_partition_text_3""" - // 2. test varchar type column as first col - sql """create table ctas_partition_text_4 distributed by hash(int_u) buckets 1 properties("replication_num" = "1") as select varchar, int_u from mysql_conjuncts.doris_test.all_types;""" - qt_sql """desc ctas_partition_text_4""" - sql """drop catalog if exists mysql_conjuncts;""" } } diff --git a/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy b/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy index 7a0864923f51cb..dfe5532fefdb44 100644 --- a/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy +++ b/regression-test/suites/external_table_p0/kerberos/test_single_hive_kerberos.groovy @@ -29,6 +29,10 @@ suite("test_single_hive_kerberos", "p0,external,kerberos,external_docker,externa "hadoop.security.authentication" = "kerberos", "hadoop.kerberos.principal"="presto-server/presto-master.docker.cluster@LABS.TERADATA.COM", "hadoop.kerberos.keytab" = "/keytabs/presto-server.keytab", + "hadoop.security.auth_to_local" = "RULE:[2:\$1@\$0](.*@LABS.TERADATA.COM)s/@.*// + RULE:[2:\$1@\$0](.*@OTHERLABS.TERADATA.COM)s/@.*// + RULE:[2:\$1@\$0](.*@OTHERREALM.COM)s/@.*// + DEFAULT", "hive.metastore.sasl.enabled " = "true", "hive.metastore.kerberos.principal" = "hive/_HOST@LABS.TERADATA.COM" ); @@ -57,7 +61,7 @@ suite("test_single_hive_kerberos", "p0,external,kerberos,external_docker,externa logger.info(e.toString()) // caused by a warning msg if enable sasl on hive but "hive.metastore.sasl.enabled" is not true: // "set_ugi() not successful, Likely cause: new client talking to old server. Continuing without it." - assertTrue(e.toString().contains("org.apache.thrift.transport.TTransportException: null")) + assertTrue(e.toString().contains("thrift.transport.TTransportException")) } try { diff --git a/regression-test/suites/external_table_p0/paimon/paimon_tb_mix_format.groovy b/regression-test/suites/external_table_p0/paimon/paimon_tb_mix_format.groovy new file mode 100644 index 00000000000000..c8569f5c03fa68 --- /dev/null +++ b/regression-test/suites/external_table_p0/paimon/paimon_tb_mix_format.groovy @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("paimon_tb_mix_format", "p0,external,doris,external_docker,external_docker_doris") { + + logger.info("start paimon test") + String enabled = context.config.otherConfigs.get("enablePaimonTest") + if (enabled == null || !enabled.equalsIgnoreCase("true")) { + logger.info("disabled paimon test") + return + } + + try { + String catalog_name = "paimon_tb_mix_format" + String minio_port = context.config.otherConfigs.get("iceberg_minio_port") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + + sql """drop catalog if exists ${catalog_name}""" + sql """CREATE CATALOG ${catalog_name} PROPERTIES ( + 'type'='paimon', + 'warehouse' = 's3://warehouse/wh/', + "s3.access_key" = "admin", + "s3.secret_key" = "password", + "s3.endpoint" = "http://${externalEnvIp}:${minio_port}", + "s3.region" = "us-east-1" + );""" + + logger.info("catalog " + catalog_name + " created") + sql """switch ${catalog_name};""" + logger.info("switched to catalog " + catalog_name) + sql """use test_paimon_spark;""" + + sql """set force_jni_scanner=true""" + qt_order """ select * from test_tb_mix_format order by par,id; """ + + sql """set force_jni_scanner=false""" + qt_order """ select * from test_tb_mix_format order by par,id; """ + } finally { + sql """set force_jni_scanner=false""" + } + +} \ No newline at end of file diff --git a/regression-test/suites/external_table_p0/tvf/test_hdfs_tvf.groovy b/regression-test/suites/external_table_p0/tvf/test_hdfs_tvf.groovy index abdfd871a1f85f..8c4028bfefe021 100644 --- a/regression-test/suites/external_table_p0/tvf/test_hdfs_tvf.groovy +++ b/regression-test/suites/external_table_p0/tvf/test_hdfs_tvf.groovy @@ -312,6 +312,32 @@ suite("test_hdfs_tvf","external,hive,tvf,external_docker") { "column_separator" = ",", "format" = "${format}"); """ + + // test create view from tvf and alter view from tvf + uri = "${defaultFS}" + "/user/doris/preinstalled_data/csv_format_test/all_types.csv" + format = "csv" + sql """ DROP VIEW IF EXISTS test_hdfs_tvf_create_view;""" + sql """ + create view test_hdfs_tvf_create_view as + select * from HDFS( + "uri" = "${uri}", + "hadoop.username" = "${hdfsUserName}", + "column_separator" = ",", + "format" = "${format}") order by c1; + """ + + order_qt_create_view """ select * from test_hdfs_tvf_create_view order by c1 limit 20; """ + + sql """ + alter view test_hdfs_tvf_create_view as + select c1 from HDFS( + "uri" = "${uri}", + "hadoop.username" = "${hdfsUserName}", + "column_separator" = ",", + "format" = "${format}") order by c1; + """ + + order_qt_alter_view """ select * from test_hdfs_tvf_create_view order by c1 limit 20; """ } finally { } } diff --git a/regression-test/suites/external_table_p0/tvf/test_tvf_view_count.groovy b/regression-test/suites/external_table_p0/tvf/test_tvf_view_count.groovy index 415b7cfe354b22..d0d6d80fd5eba2 100644 --- a/regression-test/suites/external_table_p0/tvf/test_tvf_view_count.groovy +++ b/regression-test/suites/external_table_p0/tvf/test_tvf_view_count.groovy @@ -24,7 +24,6 @@ suite("test_tvf_view_count", "p0,external,tvf,external_docker,hive") { sql """drop database if exists test_tvf_view_count_p2""" sql """create database test_tvf_view_count_p2""" sql """use test_tvf_view_count_p2""" - sql """set enable_nereids_planner=false""" sql """create view tvf_view_count as select * from hdfs ( "uri"="hdfs://${nameNodeHost}:${hdfsPort}/user/doris/tpch1.db/tpch1_parquet/part/part-00000-cb9099f7-a053-4f9a-80af-c659cfa947cc-c000.snappy.parquet", "hadoop.username" = "hadoop", diff --git a/regression-test/suites/external_table_p0/tvf/upgrade/load.groovy b/regression-test/suites/external_table_p0/tvf/upgrade/load.groovy new file mode 100644 index 00000000000000..5fac50e64ffa81 --- /dev/null +++ b/regression-test/suites/external_table_p0/tvf/upgrade/load.groovy @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tvf_upgrade_load", "p0,external,hive,external_docker,external_docker_hive,restart_fe,upgrade_case") { + String hdfs_port = context.config.otherConfigs.get("hive2HdfsPort") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + + // It's okay to use random `hdfsUser`, but can not be empty. + def hdfsUserName = "doris" + def format = "csv" + def defaultFS = "hdfs://${externalEnvIp}:${hdfs_port}" + def uri = "" + + String enabled = context.config.otherConfigs.get("enableHiveTest") + if (enabled != null && enabled.equalsIgnoreCase("true")) { + // test create view from tvf and alter view from tvf + uri = "${defaultFS}" + "/user/doris/preinstalled_data/csv_format_test/all_types.csv" + format = "csv" + sql """ DROP VIEW IF EXISTS test_hdfs_tvf_create_view;""" + sql """ + create view test_hdfs_tvf_create_view as + select * from HDFS( + "uri" = "${uri}", + "hadoop.username" = "${hdfsUserName}", + "column_separator" = ",", + "format" = "${format}") order by c1; + """ + logger.info("View test_hdfs_tvf_create_view created") + + + sql """ + alter view test_hdfs_tvf_create_view as + select c1 from HDFS( + "uri" = "${uri}", + "hadoop.username" = "${hdfsUserName}", + "column_separator" = ",", + "format" = "${format}") order by c1; + """ + logger.info("View test_hdfs_tvf_create_view altered") + } +} \ No newline at end of file diff --git a/regression-test/suites/external_table_p0/tvf/upgrade/test.groovy b/regression-test/suites/external_table_p0/tvf/upgrade/test.groovy new file mode 100644 index 00000000000000..a3b0795ef46f98 --- /dev/null +++ b/regression-test/suites/external_table_p0/tvf/upgrade/test.groovy @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_tvf_upgrade_test", "p0,external,hive,external_docker,external_docker_hive,restart_fe,upgrade_case") { + String hdfs_port = context.config.otherConfigs.get("hive2HdfsPort") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + + // It's okay to use random `hdfsUser`, but can not be empty. + def hdfsUserName = "doris" + def format = "csv" + def defaultFS = "hdfs://${externalEnvIp}:${hdfs_port}" + def uri = "" + + String enabled = context.config.otherConfigs.get("enableHiveTest") + if (enabled != null && enabled.equalsIgnoreCase("true")) { + order_qt_create_view """ select * from test_hdfs_tvf_create_view order by c1 limit 20; """ + + order_qt_alter_view """ select * from test_hdfs_tvf_create_view order by c1 limit 20; """ + } +} \ No newline at end of file diff --git a/regression-test/suites/external_table_p2/es/test_external_catalog_es.groovy b/regression-test/suites/external_table_p2/es/test_external_catalog_es.groovy index 19ea7d675db75d..5412bc736c7956 100644 --- a/regression-test/suites/external_table_p2/es/test_external_catalog_es.groovy +++ b/regression-test/suites/external_table_p2/es/test_external_catalog_es.groovy @@ -16,6 +16,11 @@ // under the License. //import org.postgresql.Driver suite("test_external_catalog_es", "p2,external,es,external_remote,external_remote_es") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableExternalEsTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { diff --git a/regression-test/suites/external_table_p2/hive/test_cloud_accessible_obs.groovy b/regression-test/suites/external_table_p2/hive/test_cloud_accessible_obs.groovy index b475088bd5615b..c70bb08c02e8ff 100644 --- a/regression-test/suites/external_table_p2/hive/test_cloud_accessible_obs.groovy +++ b/regression-test/suites/external_table_p2/hive/test_cloud_accessible_obs.groovy @@ -16,6 +16,12 @@ // under the License. suite("test_cloud_accessible_obs", "p2,external,hive,external_remote,external_remote_hive") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableObjStorageTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_cloud_accessible_oss.groovy b/regression-test/suites/external_table_p2/hive/test_cloud_accessible_oss.groovy index d7319a7ed087b0..d43ecaebaf96fa 100644 --- a/regression-test/suites/external_table_p2/hive/test_cloud_accessible_oss.groovy +++ b/regression-test/suites/external_table_p2/hive/test_cloud_accessible_oss.groovy @@ -16,6 +16,13 @@ // under the License. suite("test_cloud_accessible_oss", "p2,external,hive,external_remote,external_remote_hive") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableObjStorageTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_external_brown.groovy b/regression-test/suites/external_table_p2/hive/test_external_brown.groovy index 5ac400be648245..02cc1c1ff10549 100644 --- a/regression-test/suites/external_table_p2/hive/test_external_brown.groovy +++ b/regression-test/suites/external_table_p2/hive/test_external_brown.groovy @@ -16,6 +16,11 @@ // under the License. suite("test_external_brown", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } def formats = ["_parquet", "_orc", ""] def CPUNetworkUtilization_order = """ diff --git a/regression-test/suites/external_table_p2/hive/test_external_catalog_glue_table.groovy b/regression-test/suites/external_table_p2/hive/test_external_catalog_glue_table.groovy index 5b3edd44c78905..ecb37208cf256e 100644 --- a/regression-test/suites/external_table_p2/hive/test_external_catalog_glue_table.groovy +++ b/regression-test/suites/external_table_p2/hive/test_external_catalog_glue_table.groovy @@ -16,6 +16,13 @@ // under the License. suite("test_external_catalog_glue_table", "p2,external,hive,external_remote,external_remote_hive") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_external_github.groovy b/regression-test/suites/external_table_p2/hive/test_external_github.groovy index edd8e9436c2aa5..2d456b9845543f 100644 --- a/regression-test/suites/external_table_p2/hive/test_external_github.groovy +++ b/regression-test/suites/external_table_p2/hive/test_external_github.groovy @@ -16,6 +16,11 @@ // under the License. suite("test_external_github", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } def formats = ["_parquet", "_orc"] diff --git a/regression-test/suites/external_table_p2/hive/test_external_yandex.groovy b/regression-test/suites/external_table_p2/hive/test_external_yandex.groovy deleted file mode 100644 index 05f635945f1342..00000000000000 --- a/regression-test/suites/external_table_p2/hive/test_external_yandex.groovy +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_external_yandex", "p2,external,hive,external_remote,external_remote_hive") { - - def formats = ["_parquet", "_orc", ""] - def duplicateAggregationKeys = "SELECT URL, EventDate, max(URL) FROM hitsSUFFIX WHERE CounterID = 1704509 AND UserID = 4322253409885123546 GROUP BY URL, EventDate, EventDate ORDER BY URL, EventDate;" - def like1 = """SELECT count() FROM hitsSUFFIX WHERE URL LIKE '%/avtomobili_s_probegom/_%__%__%__%';""" - def like2 = """SELECT count() FROM hitsSUFFIX WHERE URL LIKE '/avtomobili_s_probegom/_%__%__%__%';""" - def like3 = """SELECT count() FROM hitsSUFFIX WHERE URL LIKE '%_/avtomobili_s_probegom/_%__%__%__%';""" - def like4 = """SELECT count() FROM hitsSUFFIX WHERE URL LIKE '%avtomobili%';""" - def loyalty = """SELECT loyalty, count() AS c - FROM - ( - SELECT UserID, CAST(((if(yandex > google, yandex / (yandex + google), 0 - google / (yandex + google))) * 10) AS TINYINT) AS loyalty - FROM - ( - SELECT UserID, sum(if(SearchEngineID = 2, 1, 0)) AS yandex, sum(if(SearchEngineID = 3, 1, 0)) AS google - FROM hitsSUFFIX - WHERE SearchEngineID = 2 OR SearchEngineID = 3 GROUP BY UserID HAVING yandex + google > 10 - ) t1 - ) t2 - GROUP BY loyalty - ORDER BY loyalty;""" - def maxStringIf = """SELECT CounterID, count(), max(if(SearchPhrase != "", SearchPhrase, "")) FROM hitsSUFFIX GROUP BY CounterID ORDER BY count() DESC LIMIT 20;""" - def minMax = """SELECT CounterID, min(WatchID), max(WatchID) FROM hitsSUFFIX GROUP BY CounterID ORDER BY count() DESC LIMIT 20;""" - def monotonicEvaluationSegfault = """SELECT max(0) FROM visitsSUFFIX WHERE (CAST(CAST(StartDate AS DATETIME) AS INT)) > 1000000000;""" - def subqueryInWhere = """SELECT count() FROM hitsSUFFIX WHERE UserID IN (SELECT UserID FROM hitsSUFFIX WHERE CounterID = 800784);""" - def where01 = """SELECT CounterID, count(distinct UserID) FROM hitsSUFFIX WHERE 0 != 0 GROUP BY CounterID;""" - def where02 = """SELECT CounterID, count(distinct UserID) FROM hitsSUFFIX WHERE CAST(0 AS BOOLEAN) AND CounterID = 1704509 GROUP BY CounterID;""" - - String enabled = context.config.otherConfigs.get("enableExternalHiveTest") - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") - String extHiveHmsPort = context.config.otherConfigs.get("extHiveHmsPort") - String catalog_name = "external_yandex" - - sql """drop catalog if exists ${catalog_name};""" - - sql """ - create catalog if not exists ${catalog_name} properties ( - 'type'='hms', - 'hive.metastore.uris' = 'thrift://${extHiveHmsHost}:${extHiveHmsPort}' - ); - """ - logger.info("catalog " + catalog_name + " created") - - sql """switch ${catalog_name};""" - - logger.info("switched to catalog " + catalog_name) - - sql """use multi_catalog;""" - - logger.info("use multi_catalog") - - for (String format in formats) { - logger.info("Process format " + format) - qt_01 duplicateAggregationKeys.replace("SUFFIX", format) - qt_02 like1.replace("SUFFIX", format) - qt_03 like2.replace("SUFFIX", format) - qt_04 like3.replace("SUFFIX", format) - qt_05 like4.replace("SUFFIX", format) - qt_06 loyalty.replace("SUFFIX", format) - qt_07 maxStringIf.replace("SUFFIX", format) - qt_08 minMax.replace("SUFFIX", format) - qt_09 monotonicEvaluationSegfault.replace("SUFFIX", format) - qt_10 subqueryInWhere.replace("SUFFIX", format) - qt_11 where01.replace("SUFFIX", format) - qt_12 where02.replace("SUFFIX", format) - } - } -} - diff --git a/regression-test/suites/external_table_p2/hive/test_external_yandex_nereids.groovy b/regression-test/suites/external_table_p2/hive/test_external_yandex_nereids.groovy index 9874f2cad3eed4..9cd511bf98a1e5 100644 --- a/regression-test/suites/external_table_p2/hive/test_external_yandex_nereids.groovy +++ b/regression-test/suites/external_table_p2/hive/test_external_yandex_nereids.groovy @@ -16,6 +16,11 @@ // under the License. suite("test_external_yandex_nereids", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } def formats = ["_parquet"] def duplicateAggregationKeys = "SELECT URL, EventDate, max(URL) FROM hitsSUFFIX WHERE CounterID = 1704509 AND UserID = 4322253409885123546 GROUP BY URL, EventDate, EventDate ORDER BY URL, EventDate;" diff --git a/regression-test/suites/external_table_p2/hive/test_hive_hudi.groovy b/regression-test/suites/external_table_p2/hive/test_hive_hudi.groovy index 1c69c5a738d826..d98f176e64b752 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_hudi.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_hudi.groovy @@ -16,6 +16,12 @@ // under the License. suite("test_hive_hudi", "p2,external,hive,hudi") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_hive_hudi_statistics.groovy b/regression-test/suites/external_table_p2/hive/test_hive_hudi_statistics.groovy index 55e5037de458c7..a7beff6022772f 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_hudi_statistics.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_hudi_statistics.groovy @@ -16,6 +16,12 @@ // under the License. suite("test_hive_hudi_statistics", "p2,external,hive,hudi") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_hive_partition_statistic.groovy b/regression-test/suites/external_table_p2/hive/test_hive_partition_statistic.groovy index 6f398a62e3cd83..c3cde4d7962775 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_partition_statistic.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_partition_statistic.groovy @@ -16,6 +16,11 @@ // under the License. suite("test_hive_partition_statistic", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { logger.info("This feature has not been supported yet, skip it.") diff --git a/regression-test/suites/external_table_p2/hive/test_hive_statistic_cache.groovy b/regression-test/suites/external_table_p2/hive/test_hive_statistic_cache.groovy index de39d408a53052..f937c97d1e5d25 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_statistic_cache.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_statistic_cache.groovy @@ -17,6 +17,12 @@ suite("test_hive_statistic_cache", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + def wait_row_count_reported = { table, expected -> for (int i = 0; i < 10; i++) { result = sql """show table stats ${table}""" diff --git a/regression-test/suites/external_table_p2/hive/test_hive_statistic_sample.groovy b/regression-test/suites/external_table_p2/hive/test_hive_statistic_sample.groovy index 1c1a72173875ed..0c17f40fe53822 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_statistic_sample.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_statistic_sample.groovy @@ -16,6 +16,11 @@ // under the License. suite("test_hive_statistic_sample", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_hive_write_insert_s3.groovy b/regression-test/suites/external_table_p2/hive/test_hive_write_insert_s3.groovy index cf9fea67cbd685..2c76622201dc8f 100644 --- a/regression-test/suites/external_table_p2/hive/test_hive_write_insert_s3.groovy +++ b/regression-test/suites/external_table_p2/hive/test_hive_write_insert_s3.groovy @@ -16,6 +16,12 @@ // under the License. suite("test_hive_write_insert_s3", "p2,external,hive,external_remote,external_remote_hive") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } def format_compressions = ["parquet_snappy"] def s3BucketName = getS3BucketName() diff --git a/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy b/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy index ea7f5701df8b4f..83b00a4fd98880 100644 --- a/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy +++ b/regression-test/suites/external_table_p2/hive/test_select_count_optimize.groovy @@ -16,6 +16,13 @@ // under the License. suite("test_select_count_optimize", "p2,external,hive,external_remote,external_remote_hive") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost") diff --git a/regression-test/suites/external_table_p2/hive/test_upper_case_column_name.groovy b/regression-test/suites/external_table_p2/hive/test_upper_case_column_name.groovy index 0033e0dca2406d..95edbc8bac2d47 100644 --- a/regression-test/suites/external_table_p2/hive/test_upper_case_column_name.groovy +++ b/regression-test/suites/external_table_p2/hive/test_upper_case_column_name.groovy @@ -16,6 +16,12 @@ // under the License. suite("upper_case_column_name", "p2,external,hive,external_remote,external_remote_hive") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } def hiveParquet1 = """select * from hive_upper_case_parquet;""" def hiveParquet2 = """select * from hive_upper_case_parquet where id=1;""" def hiveParquet3 = """select * from hive_upper_case_parquet where id>1;""" diff --git a/regression-test/suites/external_table_p2/hive/test_viewfs_hive.groovy b/regression-test/suites/external_table_p2/hive/test_viewfs_hive.groovy index 9ccea773ed4093..020bcb9ab08352 100644 --- a/regression-test/suites/external_table_p2/hive/test_viewfs_hive.groovy +++ b/regression-test/suites/external_table_p2/hive/test_viewfs_hive.groovy @@ -16,6 +16,11 @@ // under the License. suite("test_viewfs_hive", "p2,external,hive,external_remote,external_remote_hive") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { diff --git a/regression-test/suites/external_table_p2/hudi/test_hudi_timetravel.groovy b/regression-test/suites/external_table_p2/hudi/test_hudi_timetravel.groovy index e8c859698326b3..db535e3517987e 100644 --- a/regression-test/suites/external_table_p2/hudi/test_hudi_timetravel.groovy +++ b/regression-test/suites/external_table_p2/hudi/test_hudi_timetravel.groovy @@ -17,6 +17,12 @@ suite("test_hudi_timetravel", "p2,external,hudi,external_remote,external_remote_hudi") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalHudiTest") if (enabled == null || !enabled.equalsIgnoreCase("true")) { logger.info("disable hudi test") diff --git a/regression-test/suites/external_table_p2/maxcompute/test_external_catalog_maxcompute.groovy b/regression-test/suites/external_table_p2/maxcompute/test_external_catalog_maxcompute.groovy index 7bcd4b5ad4198a..6663a2aa842df3 100644 --- a/regression-test/suites/external_table_p2/maxcompute/test_external_catalog_maxcompute.groovy +++ b/regression-test/suites/external_table_p2/maxcompute/test_external_catalog_maxcompute.groovy @@ -278,7 +278,13 @@ CAST(-7.0 AS DOUBLE), CAST(8.00 AS DECIMAL(5,2)) ); - + drop table mc_test_null; + CREATE TABLE `mc_test_null` ( + `id` int, + `col` int + ); + insert into mc_test_null values (1,1),(2,NULL),(3,NULL),(4,4),(5,NULL),(6,6); + */ suite("test_external_catalog_maxcompute", "p2,external,maxcompute,external_remote,external_remote_maxcompute") { String enabled = context.config.otherConfigs.get("enableMaxComputeTest") @@ -353,5 +359,12 @@ suite("test_external_catalog_maxcompute", "p2,external,maxcompute,external_remot order_qt_multi_partition_q8 """ select count(*) from multi_partitions where pt>=3; """ order_qt_multi_partition_q9 """ select city,mnt,gender,finished_time,order_rate,cut_date,create_time,pt, yy, mm, dd from multi_partitions where pt >= 2 and pt < 4 and finished_time is not null; """ order_qt_multi_partition_q10 """ select pt, yy, mm, dd from multi_partitions where pt >= 2 and create_time > '2023-08-03 03:11:00' order by pt, yy, mm, dd; """ + + //test null value + order_qt_null_1 """ select * from mc_test_null; """ + order_qt_null_2 """ select * from mc_test_null where col is not null ; """ + order_qt_null_3 """ select * from mc_test_null where col is null ; """ + + } } diff --git a/regression-test/suites/external_table_p2/mysql/test_external_catalog_mysql.groovy b/regression-test/suites/external_table_p2/mysql/test_external_catalog_mysql.groovy index 9d1ab73e5106b1..febd5c01758db9 100644 --- a/regression-test/suites/external_table_p2/mysql/test_external_catalog_mysql.groovy +++ b/regression-test/suites/external_table_p2/mysql/test_external_catalog_mysql.groovy @@ -17,6 +17,12 @@ //import com.mysql.cj.jdbc.Driver suite("test_external_catalog_mysql", "p2,external,mysql,external_remote,external_remote_mysql") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalMysqlTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extMysqlHost = context.config.otherConfigs.get("extMysqlHost") diff --git a/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql.groovy b/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql.groovy index 20b447a3bdd173..81374ae427e130 100644 --- a/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql.groovy +++ b/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql.groovy @@ -17,6 +17,12 @@ //import com.mysql.cj.jdbc.Driver suite("test_external_resource_mysql", "p2,external,mysql,external_remote,external_remote_mysql") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalMysqlTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extMysqlHost = context.config.otherConfigs.get("extMysqlHost") diff --git a/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql_nereids.groovy b/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql_nereids.groovy deleted file mode 100644 index 86b49bd251916b..00000000000000 --- a/regression-test/suites/external_table_p2/mysql/test_external_resource_mysql_nereids.groovy +++ /dev/null @@ -1,156 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -//import com.mysql.cj.jdbc.Driver -suite("test_external_resource_mysql_nereids", "p2,external,mysql,external_remote,external_remote_mysql") { - - String enabled = context.config.otherConfigs.get("enableExternalMysqlTest") - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String extMysqlHost = context.config.otherConfigs.get("extMysqlHost") - String extMysqlPort = context.config.otherConfigs.get("extMysqlPort") - String extMysqlUser = context.config.otherConfigs.get("extMysqlUser") - String extMysqlPassword = context.config.otherConfigs.get("extMysqlPassword") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-8.0.25.jar" - String mysqlResourceName = "jdbc_resource_mysql_57_n" - String mysqlDatabaseName01 = "external_mysql_database_ssb_n" - String mysqlTableNameLineOrder = "external_mysql_table_lineorder_n" - String mysqlTableNameCustomer = "external_mysql_table_customer_n" - String mysqlTableNameSupplier = "external_mysql_table_supplier_n" - - - - sql """set enable_nereids_planner=true;""" - sql """set enable_fallback_to_original_planner=false;""" - sql """drop database if exists ${mysqlDatabaseName01};""" - sql """create database ${mysqlDatabaseName01};""" - sql """use ${mysqlDatabaseName01};""" - - - sql """drop resource if exists ${mysqlResourceName};""" - sql """ - create external resource ${mysqlResourceName} - properties ( - "type"="jdbc", - "user"="${extMysqlUser}", - "password"="${extMysqlPassword}", - "jdbc_url"="jdbc:mysql://${extMysqlHost}:${extMysqlPort}/ssb?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true&serverTimezone=Asia/Shanghai&useSSL=false", - "driver_url"="${driver_url}", - "driver_class"="com.mysql.cj.jdbc.Driver" - ); - """ - - sql """drop table if exists ${mysqlTableNameLineOrder}""" - sql """ - CREATE EXTERNAL TABLE ${mysqlTableNameLineOrder} ( - `lo_orderkey` bigint(20) NOT NULL COMMENT "", - `lo_linenumber` bigint(20) NOT NULL COMMENT "", - `lo_custkey` int(11) NOT NULL COMMENT "", - `lo_partkey` int(11) NOT NULL COMMENT "", - `lo_suppkey` int(11) NOT NULL COMMENT "", - `lo_orderdate` int(11) NOT NULL COMMENT "", - `lo_orderpriority` varchar(16) NOT NULL COMMENT "", - `lo_shippriority` int(11) NOT NULL COMMENT "", - `lo_quantity` bigint(20) NOT NULL COMMENT "", - `lo_extendedprice` bigint(20) NOT NULL COMMENT "", - `lo_ordtotalprice` bigint(20) NOT NULL COMMENT "", - `lo_discount` bigint(20) NOT NULL COMMENT "", - `lo_revenue` bigint(20) NOT NULL COMMENT "", - `lo_supplycost` bigint(20) NOT NULL COMMENT "", - `lo_tax` bigint(20) NOT NULL COMMENT "", - `lo_commitdate` bigint(20) NOT NULL COMMENT "", - `lo_shipmode` varchar(11) NOT NULL COMMENT "" - ) ENGINE=JDBC - PROPERTIES ( - "resource" = "${mysqlResourceName}", - "table" = "lineorder", - "table_type"="mysql" - ); - """ - - def res = sql """select * from ${mysqlTableNameLineOrder} limit 10;""" - logger.info("recoding select: " + res.toString()) - - sql """drop table if exists ${mysqlTableNameCustomer}""" - sql """ - CREATE EXTERNAL TABLE ${mysqlTableNameCustomer} ( - `c_custkey` int(11) DEFAULT NULL, - `c_name` varchar(25) NOT NULL, - `c_address` varchar(40) NOT NULL, - `c_city` varchar(10) NOT NULL, - `c_nation` varchar(15) NOT NULL, - `c_region` varchar(12) NOT NULL, - `c_phone` varchar(15) NOT NULL - ) ENGINE=JDBC - PROPERTIES ( - "resource" = "${mysqlResourceName}", - "table" = "customer", - "table_type"="mysql" - ); - """ - - def res1 = sql """select * from ${mysqlTableNameCustomer} where c_custkey >100 limit 10;""" - logger.info("recoding select: " + res1.toString()) - - def res2 = sql """select * from ${mysqlTableNameCustomer} order by c_custkey desc limit 10;""" - logger.info("recoding select: " + res2.toString()) - -// def res3 = sql """select AVG(lo_discount) from ${mysqlTableNameCustomer} limit 10;""" -// logger.info("recoding select: " + res3.toString()) -// -// def res4 = sql """select MAX(lo_discount) from ${mysqlTableNameCustomer} limit 10;""" -// logger.info("recoding select: " + res4.toString()) - - def res5 = sql """select count(*) from ${mysqlTableNameCustomer};""" - logger.info("recoding select: " + res5.toString()) - - sql """drop table if exists ${mysqlTableNameSupplier}""" - sql """ - CREATE EXTERNAL TABLE ${mysqlTableNameSupplier} ( - `s_suppkey` int(11) DEFAULT NULL, - `s_name` varchar(25) NOT NULL, - `s_address` varchar(25) NOT NULL, - `s_city` varchar(10) NOT NULL, - `s_nation` varchar(15) NOT NULL, - `s_region` varchar(12) NOT NULL, - `s_phone` varchar(15) NOT NULL - ) ENGINE=JDBC - PROPERTIES ( - "resource" = "${mysqlResourceName}", - "table" = "supplier", - "table_type"="mysql" - ); - """ - def res6 = sql """select count(*) from ${mysqlTableNameSupplier};""" - logger.info("recoding select: " + res6.toString()) - - def res7 = sql """select * from ${mysqlTableNameCustomer} a join ${mysqlTableNameSupplier} b on a.c_nation =b.s_nation limit 5;""" - logger.info("recoding select: " + res7.toString()) - - - sql """drop table if exists ${mysqlTableNameLineOrder}""" - sql """drop database if exists ${mysqlDatabaseName01};""" - } -} - - - - - - - - diff --git a/regression-test/suites/external_table_p2/pg/test_external_pg.groovy b/regression-test/suites/external_table_p2/pg/test_external_pg.groovy deleted file mode 100644 index 099f2e243c2ecf..00000000000000 --- a/regression-test/suites/external_table_p2/pg/test_external_pg.groovy +++ /dev/null @@ -1,133 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -//import org.postgresql.Driver -suite("test_external_pg", "p2,external,pg,external_remote,external_remote_pg") { - - String enabled = context.config.otherConfigs.get("enableExternalPgTest") - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String extPgHost = context.config.otherConfigs.get("extPgHost") - String extPgPort = context.config.otherConfigs.get("extPgPort") - String extPgUser = context.config.otherConfigs.get("extPgUser") - String extPgPassword = context.config.otherConfigs.get("extPgPassword") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/postgresql-42.5.0.jar" - String jdbcResourcePg14 = "jdbc_resource_pg_14" - String jdbcPg14Database1 = "jdbc_pg_14_database1" - String pgTableNameLineOrder = "jdbc_pg_14_table1" - String pgTableNameCustomer = "jdbc_pg_14_customer" - String pgTableNameSupplier = "jdbc_pg_14_supplier" - - sql """drop database if exists ${jdbcPg14Database1};""" - sql """drop resource if exists ${jdbcResourcePg14};""" - - sql """drop database if exists ${jdbcPg14Database1};""" - sql """create database ${jdbcPg14Database1};""" - sql """use ${jdbcPg14Database1};""" - sql """ - create external resource ${jdbcResourcePg14} - properties ( - "type"="jdbc", - "user"="${extPgUser}", - "password"="${extPgPassword}", - "jdbc_url"="jdbc:postgresql://${extPgHost}:${extPgPort}/ssb?currentSchema=ssb&useCursorFetch=true", - "driver_url"="${driver_url}", - "driver_class"="org.postgresql.Driver" - ); - """ - - sql """drop table if exists ${pgTableNameLineOrder}""" - sql """ - CREATE EXTERNAL TABLE ${pgTableNameLineOrder} ( - `lo_orderkey` bigint(20) NOT NULL COMMENT "", - `lo_linenumber` bigint(20) NOT NULL COMMENT "", - `lo_custkey` int(11) NOT NULL COMMENT "", - `lo_partkey` int(11) NOT NULL COMMENT "", - `lo_suppkey` int(11) NOT NULL COMMENT "", - `lo_orderdate` int(11) NOT NULL COMMENT "", - `lo_orderpriority` varchar(16) NOT NULL COMMENT "", - `lo_shippriority` int(11) NOT NULL COMMENT "", - `lo_quantity` bigint(20) NOT NULL COMMENT "", - `lo_extendedprice` bigint(20) NOT NULL COMMENT "", - `lo_ordtotalprice` bigint(20) NOT NULL COMMENT "", - `lo_discount` bigint(20) NOT NULL COMMENT "", - `lo_revenue` bigint(20) NOT NULL COMMENT "", - `lo_supplycost` bigint(20) NOT NULL COMMENT "", - `lo_tax` bigint(20) NOT NULL COMMENT "", - `lo_commitdate` bigint(20) NOT NULL COMMENT "", - `lo_shipmode` varchar(11) NOT NULL COMMENT "" - ) ENGINE=JDBC - PROPERTIES ( - "resource" = "${jdbcResourcePg14}", - "table" = "lineorder", - "table_type"="postgresql" - ); - """ - - sql """drop table if exists ${pgTableNameCustomer}""" - sql """ - CREATE EXTERNAL TABLE ${pgTableNameCustomer} ( - `c_custkey` int(11) DEFAULT NULL, - `c_name` varchar(25) NOT NULL, - `c_address` varchar(40) NOT NULL, - `c_city` varchar(10) NOT NULL, - `c_nation` varchar(15) NOT NULL, - `c_region` varchar(12) NOT NULL, - `c_phone` varchar(15) NOT NULL - ) ENGINE=JDBC - PROPERTIES ( - "resource" = "${jdbcResourcePg14}", - "table" = "customer", - "table_type"="postgresql" - ); - """ - - sql """drop table if exists ${pgTableNameSupplier}""" - sql """ - CREATE EXTERNAL TABLE ${pgTableNameSupplier} ( - `s_suppkey` int(11) DEFAULT NULL, - `s_name` varchar(25) NOT NULL, - `s_address` varchar(25) NOT NULL, - `s_city` varchar(10) NOT NULL, - `s_nation` varchar(15) NOT NULL, - `s_region` varchar(12) NOT NULL, - `s_phone` varchar(15) NOT NULL - ) ENGINE=JDBC - PROPERTIES ( - "resource" = "${jdbcResourcePg14}", - "table" = "supplier", - "table_type"="postgresql" - ); - """ - - def res = sql """select count(*) from ${pgTableNameCustomer};""" - logger.info("recoding select: " + res.toString()) - - def res1 = sql """select * from ${pgTableNameSupplier} limit 10""" - logger.info("recoding select: " + res1.toString()) - - def res2 = sql """select * from ${pgTableNameSupplier} order by s_suppkey desc limit 10;""" - logger.info("recoding select: " + res2.toString()) - - def res3 = sql """select * from ${pgTableNameSupplier} where s_suppkey>100 limit 10;""" - logger.info("recoding select: " + res3.toString()) - - def res4 = sql """select * from ${pgTableNameCustomer} a join ${pgTableNameSupplier} b on a.c_nation =b.s_nation limit 5;""" - logger.info("recoding select: " + res4.toString()) - - } -} diff --git a/regression-test/suites/external_table_p2/pg/test_external_pg_nereids.groovy b/regression-test/suites/external_table_p2/pg/test_external_pg_nereids.groovy index dc88bd4a22bcb8..d6a4dd70f7377f 100644 --- a/regression-test/suites/external_table_p2/pg/test_external_pg_nereids.groovy +++ b/regression-test/suites/external_table_p2/pg/test_external_pg_nereids.groovy @@ -17,6 +17,12 @@ //import org.postgresql.Driver suite("test_external_pg_nereids", "p2,external,pg,external_remote,external_remote_pg") { + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalPgTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String extPgHost = context.config.otherConfigs.get("extPgHost") diff --git a/regression-test/suites/external_table_p2/tvf/test_iceberg_meta.groovy b/regression-test/suites/external_table_p2/tvf/test_iceberg_meta.groovy index de13c48727af8b..047b4a36fe2622 100644 --- a/regression-test/suites/external_table_p2/tvf/test_iceberg_meta.groovy +++ b/regression-test/suites/external_table_p2/tvf/test_iceberg_meta.groovy @@ -16,6 +16,13 @@ // under the License. suite("test_iceberg_meta", "p2,external,iceberg,external_remote,external_remote_iceberg") { + + Boolean ignoreP2 = true; + if (ignoreP2) { + logger.info("disable p2 test"); + return; + } + String enabled = context.config.otherConfigs.get("enableExternalHiveTest") if (enabled != null && enabled.equalsIgnoreCase("true")) { String iceberg_catalog_name = "test_iceberg_meta_tvf" @@ -48,4 +55,4 @@ suite("test_iceberg_meta", "p2,external,iceberg,external_remote,external_remote_ where snapshot_id = 7235593032487457798; """ } -} \ No newline at end of file +} diff --git a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_broker_load_with_retry.groovy b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_broker_load_with_retry.groovy new file mode 100644 index 00000000000000..035a6307d46e20 --- /dev/null +++ b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_broker_load_with_retry.groovy @@ -0,0 +1,251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cloud_mow_broker_load_with_retry", "nonConcurrent") { + if (!isCloudMode()) { + return + } + + def s3BucketName = getS3BucketName() + def s3Endpoint = getS3Endpoint() + def s3Region = getS3Region() + + GetDebugPoint().clearDebugPointsForAllFEs() + GetDebugPoint().clearDebugPointsForAllBEs() + + def customFeConfig = [ + calculate_delete_bitmap_task_timeout_seconds: 2 + ] + + def table = "tbl_basic" + setFeConfigTemporary(customFeConfig) { + + def attributesList = [ + + ] + + /* ========================================================== normal ========================================================== */ + attributesList.add(new LoadAttributes("s3://${s3BucketName}/regression/load/data/basic_data.csv", + "${table}", "LINES TERMINATED BY \"\n\"", "COLUMNS TERMINATED BY \"|\"", "FORMAT AS \"CSV\"", "(k00,k01,k02,k03,k04,k05,k06,k07,k08,k09,k10,k11,k12,k13,k14,k15,k16,k17,k18)", + "", "", "", "", "")) + + attributesList.add(new LoadAttributes("s3://${s3BucketName}/regression/load/data/basic_data.csv", + "${table}", "LINES TERMINATED BY \"\n\"", "COLUMNS TERMINATED BY \"|\"", "FORMAT AS \"CSV\"", "(K00,K01,K02,K03,K04,K05,K06,K07,K08,K09,K10,K11,K12,K13,K14,K15,K16,K17,K18)", + "", "", "", "", "")) + def ak = getS3AK() + def sk = getS3SK() + try { + sql """ DROP TABLE IF EXISTS ${table} """ + sql """ + CREATE TABLE ${table} + ( + k00 INT NOT NULL, + k01 DATE NOT NULL, + k02 BOOLEAN NULL, + k03 TINYINT NULL, + k04 SMALLINT NULL, + k05 INT NULL, + k06 BIGINT NULL, + k07 LARGEINT NULL, + k08 FLOAT NULL, + k09 DOUBLE NULL, + k10 DECIMAL(9,1) NULL, + k11 DECIMALV3(9,1) NULL, + k12 DATETIME NULL, + k13 DATEV2 NULL, + k14 DATETIMEV2 NULL, + k15 CHAR NULL, + k16 VARCHAR NULL, + k17 STRING NULL, + k18 JSON NULL, + kd01 BOOLEAN NOT NULL DEFAULT "TRUE", + kd02 TINYINT NOT NULL DEFAULT "1", + kd03 SMALLINT NOT NULL DEFAULT "2", + kd04 INT NOT NULL DEFAULT "3", + kd05 BIGINT NOT NULL DEFAULT "4", + kd06 LARGEINT NOT NULL DEFAULT "5", + kd07 FLOAT NOT NULL DEFAULT "6.0", + kd08 DOUBLE NOT NULL DEFAULT "7.0", + kd09 DECIMAL NOT NULL DEFAULT "888888888", + kd10 DECIMALV3 NOT NULL DEFAULT "999999999", + kd11 DATE NOT NULL DEFAULT "2023-08-24", + kd12 DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + kd13 DATEV2 NOT NULL DEFAULT "2023-08-24", + kd14 DATETIMEV2 NOT NULL DEFAULT CURRENT_TIMESTAMP, + kd15 CHAR(255) NOT NULL DEFAULT "我能吞下玻璃而不伤身体", + kd16 VARCHAR(300) NOT NULL DEFAULT "我能吞下玻璃而不伤身体", + kd17 STRING NOT NULL DEFAULT "我能吞下玻璃而不伤身体", + kd18 JSON NULL, + + INDEX idx_inverted_k104 (`k05`) USING INVERTED, + INDEX idx_inverted_k110 (`k11`) USING INVERTED, + INDEX idx_inverted_k113 (`k13`) USING INVERTED, + INDEX idx_inverted_k114 (`k14`) USING INVERTED, + INDEX idx_inverted_k117 (`k17`) USING INVERTED PROPERTIES("parser" = "english"), + INDEX idx_ngrambf_k115 (`k15`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + INDEX idx_ngrambf_k116 (`k16`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + INDEX idx_ngrambf_k117 (`k17`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + + INDEX idx_bitmap_k104 (`k02`) USING BITMAP, + INDEX idx_bitmap_k110 (`kd01`) USING BITMAP + + ) + UNIQUE KEY(k00) + DISTRIBUTED BY HASH(k00) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "bloom_filter_columns"="k05", + "replication_num" = "1" + ); + """ + GetDebugPoint().enableDebugPointForAllBEs("CloudEngineCalcDeleteBitmapTask.execute.enable_wait") + def i = 0 + for (LoadAttributes attributes : attributesList) { + def label = "test_s3_load_" + UUID.randomUUID().toString().replace("-", "_") + "_" + i + attributes.label = label + def prop = attributes.getPropertiesStr() + + def sql_str = """ + LOAD LABEL $label ( + $attributes.dataDesc.mergeType + DATA INFILE("$attributes.dataDesc.path") + INTO TABLE $attributes.dataDesc.tableName + $attributes.dataDesc.columnTermClause + $attributes.dataDesc.lineTermClause + $attributes.dataDesc.formatClause + $attributes.dataDesc.columns + $attributes.dataDesc.columnsFromPathClause + $attributes.dataDesc.columnMappingClause + $attributes.dataDesc.precedingFilterClause + $attributes.dataDesc.orderByClause + $attributes.dataDesc.whereExpr + ) + WITH S3 ( + "AWS_ACCESS_KEY" = "$ak", + "AWS_SECRET_KEY" = "$sk", + "AWS_ENDPOINT" = "${s3Endpoint}", + "AWS_REGION" = "${s3Region}", + "use_path_style" = "$attributes.usePathStyle", + "provider" = "${getS3Provider()}" + ) + ${prop} + """ + logger.info("submit sql: ${sql_str}"); + sql """${sql_str}""" + logger.info("Submit load with lable: $label, table: $attributes.dataDesc.tableName, path: $attributes.dataDesc.path") + + def max_try_milli_secs = 600000 + while (max_try_milli_secs > 0) { + String[][] result = sql """ show load where label="$attributes.label" order by createtime desc limit 1; """ + if (result[0][2].equals("FINISHED")) { + if (attributes.isExceptFailed) { + assertTrue(false, "load should be failed but was success: $result") + } + logger.info("Load FINISHED " + attributes.label + ": $result") + break + } + if (result[0][2].equals("CANCELLED")) { + if (attributes.isExceptFailed) { + logger.info("Load FINISHED " + attributes.label) + break + } + assertTrue(false, "load failed: $result") + break + } + Thread.sleep(1000) + max_try_milli_secs -= 1000 + if (max_try_milli_secs <= 0) { + assertTrue(false, "load Timeout: $attributes.label") + } + } + qt_select """ select count(*) from $attributes.dataDesc.tableName """ + ++i + } + } finally { + GetDebugPoint().disableDebugPointForAllFEs("CloudEngineCalcDeleteBitmapTask.execute.enable_wait") + sql "DROP TABLE IF EXISTS ${table};" + GetDebugPoint().clearDebugPointsForAllBEs() + } + } + +} + +class DataDesc { + public String mergeType = "" + public String path + public String tableName + public String lineTermClause + public String columnTermClause + public String formatClause + public String columns + public String columnsFromPathClause + public String precedingFilterClause + public String columnMappingClause + public String whereExpr + public String orderByClause +} + +class LoadAttributes { + LoadAttributes(String path, String tableName, String lineTermClause, String columnTermClause, String formatClause, + String columns, String columnsFromPathClause, String precedingFilterClause, String columnMappingClause, String whereExpr, String orderByClause, boolean isExceptFailed = false) { + this.dataDesc = new DataDesc() + this.dataDesc.path = path + this.dataDesc.tableName = tableName + this.dataDesc.lineTermClause = lineTermClause + this.dataDesc.columnTermClause = columnTermClause + this.dataDesc.formatClause = formatClause + this.dataDesc.columns = columns + this.dataDesc.columnsFromPathClause = columnsFromPathClause + this.dataDesc.precedingFilterClause = precedingFilterClause + this.dataDesc.columnMappingClause = columnMappingClause + this.dataDesc.whereExpr = whereExpr + this.dataDesc.orderByClause = orderByClause + + this.isExceptFailed = isExceptFailed + + properties = new HashMap<>() + } + + LoadAttributes addProperties(String k, String v) { + properties.put(k, v) + return this + } + + String getPropertiesStr() { + if (properties.isEmpty()) { + return "" + } + String prop = "PROPERTIES (" + properties.forEach (k, v) -> { + prop += "\"${k}\" = \"${v}\"," + } + prop = prop.substring(0, prop.size() - 1) + prop += ")" + return prop + } + + LoadAttributes withPathStyle() { + usePathStyle = "true" + return this + } + + public DataDesc dataDesc + public Map properties + public String label + public String usePathStyle = "false" + public boolean isExceptFailed +} \ No newline at end of file diff --git a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_timeout.groovy b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_timeout.groovy index 23d92f31e5ad8e..7baf18c772290f 100644 --- a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_timeout.groovy +++ b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_timeout.groovy @@ -50,7 +50,7 @@ suite("test_cloud_mow_insert_timeout", "nonConcurrent") { def customFeConfig = [ delete_bitmap_lock_expiration_seconds : 5, calculate_delete_bitmap_task_timeout_seconds : 2, - mow_insert_into_commit_retry_times : 2 + mow_calculate_delete_bitmap_retry_times : 2 ] setFeConfigTemporary(customFeConfig) { diff --git a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_with_retry.groovy b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_with_retry.groovy new file mode 100644 index 00000000000000..99eb18a4917066 --- /dev/null +++ b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_insert_with_retry.groovy @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_cloud_mow_insert_with_retry", "nonConcurrent") { + if (!isCloudMode()) { + return + } + + GetDebugPoint().clearDebugPointsForAllFEs() + GetDebugPoint().clearDebugPointsForAllBEs() + + def customFeConfig = [ + calculate_delete_bitmap_task_timeout_seconds: 2 + ] + def dbName = "regression_test_fault_injection_p0_cloud" + def table1 = dbName + ".test_cloud_mow_insert_with_retry" + setFeConfigTemporary(customFeConfig) { + try { + GetDebugPoint().enableDebugPointForAllBEs("CloudEngineCalcDeleteBitmapTask.execute.enable_wait") + sql "DROP TABLE IF EXISTS ${table1} FORCE;" + sql """ CREATE TABLE IF NOT EXISTS ${table1} ( + `k1` int NOT NULL, + `c1` int, + `c2` int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "replication_num" = "1"); """ + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl + "&useLocalSessionState=true") { + def timeout = 2000 + def now = System.currentTimeMillis() + sql "insert into ${table1} values(1,1,1);" + def time_diff = System.currentTimeMillis() - now + logger.info("time_diff:" + time_diff) + assertTrue(time_diff > timeout, "insert or delete should take over " + timeout + " ms") + + now = System.currentTimeMillis() + sql "insert into ${table1} values(2,2,2);" + time_diff = System.currentTimeMillis() - now + logger.info("time_diff:" + time_diff) + assertTrue(time_diff > timeout, "insert or delete should take over " + timeout + " ms") + order_qt_sql "select * from ${table1};" + + now = System.currentTimeMillis() + sql "delete from ${table1} where k1=2;" + time_diff = System.currentTimeMillis() - now + logger.info("time_diff:" + time_diff) + assertTrue(time_diff > timeout, "insert or delete should take over " + timeout + " ms") + order_qt_sql "select * from ${table1};" + } + } catch (Exception e) { + logger.info(e.getMessage()) + throw e + } finally { + GetDebugPoint().disableDebugPointForAllFEs("CloudEngineCalcDeleteBitmapTask.execute.enable_wait") + sql "DROP TABLE IF EXISTS ${table1};" + GetDebugPoint().clearDebugPointsForAllBEs() + } + } +} \ No newline at end of file diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_delete_publish_skip_read.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_delete_publish_skip_read.groovy index 281ff9a778782c..6fce2245a9a205 100644 --- a/regression-test/suites/fault_injection_p0/partial_update/test_delete_publish_skip_read.groovy +++ b/regression-test/suites/fault_injection_p0/partial_update/test_delete_publish_skip_read.groovy @@ -86,7 +86,7 @@ suite("test_delete_publish_skip_read", "nonConcurrent") { sql "insert into ${table1}(k1,c1,c2) values(1,999,999),(2,888,888),(3,777,777);" } - Thread.sleep(500) + Thread.sleep(1000) def t2 = Thread.start { sql "insert into ${table1}(k1,__DORIS_DELETE_SIGN__) values(2,1);" diff --git a/regression-test/suites/fault_injection_p0/test_disable_move_memtable.groovy b/regression-test/suites/fault_injection_p0/test_disable_move_memtable.groovy index 825be4e58dea0b..83b04fb514972e 100644 --- a/regression-test/suites/fault_injection_p0/test_disable_move_memtable.groovy +++ b/regression-test/suites/fault_injection_p0/test_disable_move_memtable.groovy @@ -261,7 +261,6 @@ suite("test_disable_move_memtable", "nonConcurrent") { } } - sql """ set enable_nereids_planner=true """ insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor") insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success") sql """ set enable_insert_strict = false """ @@ -269,19 +268,10 @@ suite("test_disable_move_memtable", "nonConcurrent") { insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor") insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success") sql """ set enable_insert_strict = true """ - sql """ set group_commit = off_mode """ - sql """ set enable_nereids_planner=false """ - insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor") - insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success") sql """ set group_commit = sync_mode """ insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor") insert_into_value_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success") sql """ set group_commit = off_mode """ - - sql """ set enable_nereids_planner=true """ - insert_into_select_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor") - insert_into_select_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success") - sql """ set enable_nereids_planner=false """ insert_into_select_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor") insert_into_select_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success") @@ -299,19 +289,11 @@ suite("test_disable_move_memtable", "nonConcurrent") { return } - sql """ set enable_nereids_planner=true """ - stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall", "fail") - stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall1", "success") - sql """ set enable_nereids_planner=false """ stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall", "fail") stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall1", "success") - sql """ set enable_nereids_planner=true """ broker_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall", "CANCELLED") broker_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall1", "FINISHED") - sql """ set enable_nereids_planner=false """ - broker_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "brokerload", "CANCELLED") - broker_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "brokerload1", "FINISHED") sql """ set enable_memtable_on_sink_node=false """ sql """ DROP TABLE IF EXISTS `baseall` """ diff --git a/regression-test/suites/insert_overwrite_p1/test_iot_auto_detect_concurrent.groovy b/regression-test/suites/insert_overwrite_p1/test_iot_auto_detect_concurrent.groovy index e796edfe5bb1d2..0ce026fb99b184 100644 --- a/regression-test/suites/insert_overwrite_p1/test_iot_auto_detect_concurrent.groovy +++ b/regression-test/suites/insert_overwrite_p1/test_iot_auto_detect_concurrent.groovy @@ -26,6 +26,7 @@ suite("test_iot_auto_detect_concurrent") { sql new File("""${context.file.parent}/ddl/test_iot_auto_detect_concurrent.sql""").text def success_status = true + def err_msg = "" def load_data = { range, offset, expect_success -> try { sql " use test_iot_auto_detect_concurrent; " @@ -37,6 +38,7 @@ suite("test_iot_auto_detect_concurrent") { success_status = false log.info("fails one") } + err_msg = e.getMessage() log.info("successfully catch the failed insert") return } @@ -98,10 +100,14 @@ suite("test_iot_auto_detect_concurrent") { thread6.join() thread7.join() // suppose result: Success to overwrite with a multiple of ten values - assertTrue(success_status) - qt_sql3 " select count(k0) from test_concurrent_write; " - qt_sql4 " select count(distinct k0) from test_concurrent_write; " - + if (!success_status) { + // Not allowed running Insert Overwrite on same table + assertTrue(err_msg.contains('same table')) + } else { + // The execution was fast, resulting in no concurrent execution + qt_sql3 " select count(k0) from test_concurrent_write; " + qt_sql4 " select count(distinct k0) from test_concurrent_write; " + } /// with drop partition concurrently success_status = true diff --git a/regression-test/suites/insert_p0/group_commit/test_group_commit_error.groovy b/regression-test/suites/insert_p0/group_commit/test_group_commit_error.groovy index 4589b38cafce76..7f785a3292f857 100644 --- a/regression-test/suites/insert_p0/group_commit/test_group_commit_error.groovy +++ b/regression-test/suites/insert_p0/group_commit/test_group_commit_error.groovy @@ -45,7 +45,6 @@ suite("test_group_commit_error", "nonConcurrent") { try { GetDebugPoint().enableDebugPointForAllBEs("FragmentMgr.exec_plan_fragment.failed") sql """ set group_commit = async_mode """ - sql """ set enable_nereids_planner = false """ sql """ insert into ${tableName} values (2, 2) """ } catch (Exception e) { logger.info("failed: " + e.getMessage()) @@ -56,7 +55,6 @@ suite("test_group_commit_error", "nonConcurrent") { try { GetDebugPoint().enableDebugPointForAllBEs("FragmentMgr._get_query_ctx.failed") sql """ set group_commit = async_mode """ - sql """ set enable_nereids_planner = false """ sql """ insert into ${tableName} values (3, 3) """ assertTrue(false) } catch (Exception e) { @@ -68,7 +66,6 @@ suite("test_group_commit_error", "nonConcurrent") { try { GetDebugPoint().enableDebugPointForAllBEs("LoadBlockQueue.add_block.failed") sql """ set group_commit = async_mode """ - sql """ set enable_nereids_planner = false """ sql """ insert into ${tableName} values (4, 4) """ assertTrue(false) } catch (Exception e) { diff --git a/regression-test/suites/insert_p0/insert_group_commit_into.groovy b/regression-test/suites/insert_p0/insert_group_commit_into.groovy index 7af61dfc25fa6d..67907b5a69b923 100644 --- a/regression-test/suites/insert_p0/insert_group_commit_into.groovy +++ b/regression-test/suites/insert_p0/insert_group_commit_into.groovy @@ -98,12 +98,11 @@ suite("insert_group_commit_into") { assertTrue(!serverInfo.contains("'label':'group_commit_")) } - for (item in ["legacy", "nereids"]) { - try { - // create table - sql """ drop table if exists ${table}; """ + try { + // create table + sql """ drop table if exists ${table}; """ - sql """ + sql """ CREATE TABLE ${table} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -121,200 +120,184 @@ suite("insert_group_commit_into") { ); """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl + "&useLocalSessionState=true") { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - // 1. insert into - group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${table}(id) values(4); """, 1 - group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50); """, 2 - group_commit_insert """ insert into ${table}(id, name) values(2, 'b'); """, 1 - if (item == "nereids") { - none_group_commit_insert """ insert into ${table}(id) select 6; """, 1 - } else { - group_commit_insert """ insert into ${table}(id) select 6; """, 1 - } - - getRowCount(6) - order_qt_select1 """ select * from ${table} order by id, name, score asc; """ - - // 2. insert into and delete - sql """ delete from ${table} where id = 4; """ - group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - /*sql """ insert into ${table}(id, name) values(4, 'd1'); """ - sql """ insert into ${table}(id, name) values(4, 'd1'); """ - sql """ delete from ${table} where id = 4; """*/ - group_commit_insert """ insert into ${table}(id, name) values(4, 'e1'); """, 1 - group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50); """, 2 - group_commit_insert """ insert into ${table}(id, name) values(2, 'b'); """, 1 - group_commit_insert """ insert into ${table}(id) values(6); """, 1 - - getRowCount(11) - order_qt_select2 """ select * from ${table} order by id, name, score asc; """ - - // 3. insert into and light schema change: add column - group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${table}(id) values(4); """, 1 - group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50); """, 2 - sql """ alter table ${table} ADD column age int after name; """ - group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 - group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 - - assertTrue(getAlterTableState(), "add column should success") - getRowCount(17) - order_qt_select3 """ select * from ${table} order by id, name,score asc; """ - - // 4. insert into and truncate table - /*sql """ insert into ${table}(name, id) values('c', 3); """ - sql """ insert into ${table}(id) values(4); """ - sql """ insert into ${table} values (1, 'a', 5, 10),(5, 'q', 6, 50); """*/ - sql """ truncate table ${table}; """ - group_commit_insert """ insert into ${table}(id, name) values(2, 'b'); """, 1 - group_commit_insert """ insert into ${table}(id) values(6); """, 1 - - getRowCount(2) - order_qt_select4 """ select * from ${table} order by id, name, score asc; """ - - // 5. insert into and schema change: modify column order - group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${table}(id) values(4); """, 1 - group_commit_insert """ insert into ${table}(id, name, age, score) values (1, 'a', 5, 10),(5, 'q', 6, 50); """, 2 - sql """ alter table ${table} order by (id, name, score, age); """ - group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 - group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 - - assertTrue(getAlterTableState(), "modify column order should success") - getRowCount(8) - order_qt_select5 """ select id, name, score, age from ${table} order by id, name, score asc; """ - - // 6. insert into and light schema change: drop column - group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${table}(id) values(4); """, 1 - group_commit_insert """ insert into ${table}(id, name, age, score) values (1, 'a', 5, 10),(5, 'q', 6, 50); """, 2 - sql """ alter table ${table} DROP column age; """ - group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 - group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 - - assertTrue(getAlterTableState(), "drop column should success") - getRowCount(14) - order_qt_select6 """ select * from ${table} order by id, name, score asc; """ - - // 7. insert into and add rollup - group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${table}(id) values(4); """, 1 - sql "set enable_insert_strict=false" - group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50),(101, 'a', 100); """, 2 - sql "set enable_insert_strict=true" - try { - sql """ insert into ${table} values (102, 'a', 100); """ - assertTrue(false, "insert should fail") - } catch (Exception e) { - logger.info("error: " + e.getMessage()) - assertTrue(e.getMessage().contains("url:")) - } - sql """ alter table ${table} ADD ROLLUP r1(name, score); """ - group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 - group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 - - getRowCount(20) - order_qt_select7 """ select name, score from ${table} order by name asc; """ - assertTrue(getAlterTableState(), "add rollup should success") - - if (item == "nereids") { - group_commit_insert """ insert into ${table}(id, name, score) values(10 + 1, 'h', 100); """, 1 - none_group_commit_insert """ insert into ${table}(id, name, score) select 10 + 2, 'h', 100; """, 1 - group_commit_insert """ insert into ${table} with label test_gc_""" + System.currentTimeMillis() + """ (id, name, score) values(13, 'h', 100); """, 1 - getRowCount(23) - } else { - none_group_commit_insert """ insert into ${table}(id, name, score) values(10 + 1, 'h', 100); """, 1 - none_group_commit_insert """ insert into ${table}(id, name, score) select 10 + 2, 'h', 100; """, 1 - none_group_commit_insert """ insert into ${table} with label test_gc_""" + System.currentTimeMillis() + """ (id, name, score) values(13, 'h', 100); """, 1 - } - - def rowCount = sql "select count(*) from ${table}" - logger.info("row count: " + rowCount) - assertEquals(23, rowCount[0][0]) - - // txn insert - sql """ set enable_nereids_dml = true; """ - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - def stmt = prepareStatement """ begin """ - stmt.executeUpdate() - txn_insert """ insert into ${table}(id, name, score) values(20, 'i', 101); """, 1 - txn_insert """ insert into ${table}(id, name, score) values(21, 'j', 102); """, 1 - stmt = prepareStatement """ commit """ - stmt.executeUpdate() - - rowCount = sql "select count(*) from ${table}" - logger.info("row count: " + rowCount) - assertEquals(rowCount[0][0], 25) + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl + "&useLocalSessionState=true") { + sql """ set group_commit = async_mode; """ + + // 1. insert into + group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${table}(id) values(4); """, 1 + group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50); """, 2 + group_commit_insert """ insert into ${table}(id, name) values(2, 'b'); """, 1 + none_group_commit_insert """ insert into ${table}(id) select 6; """, 1 + + getRowCount(6) + order_qt_select1 """ select * from ${table} order by id, name, score asc; """ + + // 2. insert into and delete + sql """ delete from ${table} where id = 4; """ + group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + /*sql """ insert into ${table}(id, name) values(4, 'd1'); """ + sql """ insert into ${table}(id, name) values(4, 'd1'); """ + sql """ delete from ${table} where id = 4; """*/ + group_commit_insert """ insert into ${table}(id, name) values(4, 'e1'); """, 1 + group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50); """, 2 + group_commit_insert """ insert into ${table}(id, name) values(2, 'b'); """, 1 + group_commit_insert """ insert into ${table}(id) values(6); """, 1 + + getRowCount(11) + order_qt_select2 """ select * from ${table} order by id, name, score asc; """ + + // 3. insert into and light schema change: add column + group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${table}(id) values(4); """, 1 + group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50); """, 2 + sql """ alter table ${table} ADD column age int after name; """ + group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 + group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 + + assertTrue(getAlterTableState(), "add column should success") + getRowCount(17) + order_qt_select3 """ select * from ${table} order by id, name,score asc; """ + + // 4. insert into and truncate table + /*sql """ insert into ${table}(name, id) values('c', 3); """ + sql """ insert into ${table}(id) values(4); """ + sql """ insert into ${table} values (1, 'a', 5, 10),(5, 'q', 6, 50); """*/ + sql """ truncate table ${table}; """ + group_commit_insert """ insert into ${table}(id, name) values(2, 'b'); """, 1 + group_commit_insert """ insert into ${table}(id) values(6); """, 1 + + getRowCount(2) + order_qt_select4 """ select * from ${table} order by id, name, score asc; """ + + // 5. insert into and schema change: modify column order + group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${table}(id) values(4); """, 1 + group_commit_insert """ insert into ${table}(id, name, age, score) values (1, 'a', 5, 10),(5, 'q', 6, 50); """, 2 + sql """ alter table ${table} order by (id, name, score, age); """ + group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 + group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 + + assertTrue(getAlterTableState(), "modify column order should success") + getRowCount(8) + order_qt_select5 """ select id, name, score, age from ${table} order by id, name, score asc; """ + + // 6. insert into and light schema change: drop column + group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${table}(id) values(4); """, 1 + group_commit_insert """ insert into ${table}(id, name, age, score) values (1, 'a', 5, 10),(5, 'q', 6, 50); """, 2 + sql """ alter table ${table} DROP column age; """ + group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 + group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 + + assertTrue(getAlterTableState(), "drop column should success") + getRowCount(14) + order_qt_select6 """ select * from ${table} order by id, name, score asc; """ + + // 7. insert into and add rollup + group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${table}(id) values(4); """, 1 + sql "set enable_insert_strict=false" + group_commit_insert """ insert into ${table} values (1, 'a', 10),(5, 'q', 50),(101, 'a', 100); """, 2 + sql "set enable_insert_strict=true" + try { + sql """ insert into ${table} values (102, 'a', 100); """ + assertTrue(false, "insert should fail") + } catch (Exception e) { + logger.info("error: " + e.getMessage()) + assertTrue(e.getMessage().contains("url:")) } - } finally { - // try_sql("DROP TABLE ${table}") + sql """ alter table ${table} ADD ROLLUP r1(name, score); """ + group_commit_insert_with_retry """ insert into ${table}(id, name) values(2, 'b'); """, 1 + group_commit_insert_with_retry """ insert into ${table}(id) values(6); """, 1 + + getRowCount(20) + order_qt_select7 """ select name, score from ${table} order by name asc; """ + assertTrue(getAlterTableState(), "add rollup should success") + + group_commit_insert """ insert into ${table}(id, name, score) values(10 + 1, 'h', 100); """, 1 + none_group_commit_insert """ insert into ${table}(id, name, score) select 10 + 2, 'h', 100; """, 1 + group_commit_insert """ insert into ${table} with label test_gc_""" + System.currentTimeMillis() + """ (id, name, score) values(13, 'h', 100); """, 1 + getRowCount(23) + + def rowCount = sql "select count(*) from ${table}" + logger.info("row count: " + rowCount) + assertEquals(23, rowCount[0][0]) + + // txn insert + sql """ set enable_nereids_dml = true; """ + sql """ set enable_nereids_planner=true; """ + sql """ set enable_fallback_to_original_planner=false; """ + def stmt = prepareStatement """ begin """ + stmt.executeUpdate() + txn_insert """ insert into ${table}(id, name, score) values(20, 'i', 101); """, 1 + txn_insert """ insert into ${table}(id, name, score) values(21, 'j', 102); """, 1 + stmt = prepareStatement """ commit """ + stmt.executeUpdate() + + rowCount = sql "select count(*) from ${table}" + logger.info("row count: " + rowCount) + assertEquals(rowCount[0][0], 25) } + } finally { + // try_sql("DROP TABLE ${table}") + } - // test connect to observer fe - try { - def fes = sql_return_maparray "show frontends" - logger.info("frontends: ${fes}") - if (fes.size() > 1) { - def observer_fe = null - for (def fe : fes) { - if (fe.IsMaster == "false") { - observer_fe = fe - break - } + // test connect to observer fe + try { + def fes = sql_return_maparray "show frontends" + logger.info("frontends: ${fes}") + if (fes.size() > 1) { + def observer_fe = null + for (def fe : fes) { + if (fe.IsMaster == "false") { + observer_fe = fe + break } - if (observer_fe != null) { - def url = "jdbc:mysql://${observer_fe.Host}:${observer_fe.QueryPort}/" - logger.info("observer url: " + url) - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = url) { - sql """ set group_commit = async_mode; """ - - // 1. insert into - def server_info = group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 - /*assertTrue(server_info.contains('query_id')) - // get query_id, such as 43f87963586a482a-b0496bcf9e2b5555 - def query_id_index = server_info.indexOf("'query_id':'") + "'query_id':'".length() - def query_id = server_info.substring(query_id_index, query_id_index + 33) - logger.info("query_id: " + query_id) - // 2. check profile - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET -u ${context.config.jdbcUser}:${context.config.jdbcPassword} http://${observer_fe.Host}:${observer_fe.HttpPort}") - sb.append("/api/profile?query_id=").append(query_id) - String command = sb.toString() - logger.info(command) - def process = command.execute() - def code = process.waitFor() - def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - def out = process.getText() - logger.info("Get profile: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def json = parseJson(out) - assertEquals("success", json.msg.toLowerCase())*/ - } + } + if (observer_fe != null) { + def url = "jdbc:mysql://${observer_fe.Host}:${observer_fe.QueryPort}/" + logger.info("observer url: " + url) + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = url) { + sql """ set group_commit = async_mode; """ + + // 1. insert into + def server_info = group_commit_insert """ insert into ${table}(name, id) values('c', 3); """, 1 + /*assertTrue(server_info.contains('query_id')) + // get query_id, such as 43f87963586a482a-b0496bcf9e2b5555 + def query_id_index = server_info.indexOf("'query_id':'") + "'query_id':'".length() + def query_id = server_info.substring(query_id_index, query_id_index + 33) + logger.info("query_id: " + query_id) + // 2. check profile + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET -u ${context.config.jdbcUser}:${context.config.jdbcPassword} http://${observer_fe.Host}:${observer_fe.HttpPort}") + sb.append("/api/profile?query_id=").append(query_id) + String command = sb.toString() + logger.info(command) + def process = command.execute() + def code = process.waitFor() + def err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + def out = process.getText() + logger.info("Get profile: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def json = parseJson(out) + assertEquals("success", json.msg.toLowerCase())*/ } - } else { - logger.info("only one fe, skip test connect to observer fe") } - } finally { + } else { + logger.info("only one fe, skip test connect to observer fe") } + } finally { + } - // table with array type - tableName = "insert_group_commit_into_duplicate_array" - table = dbName + "." + tableName - try { - // create table - sql """ drop table if exists ${table}; """ + // table with array type + tableName = "insert_group_commit_into_duplicate_array" + table = dbName + "." + tableName + try { + // create table + sql """ drop table if exists ${table}; """ - sql """ + sql """ CREATE table ${table} ( teamID varchar(255), service_id varchar(255), @@ -337,46 +320,40 @@ suite("insert_group_commit_into") { PROPERTIES ("replication_allocation" = "tag.location.default: 1", "group_commit_interval_ms" = "200") """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ - // 1. insert into - group_commit_insert """ + // 1. insert into + group_commit_insert """ INSERT INTO ${table} (`data_binary`, `end_time`, `endpoint_id`, `endpoint_name`, `is_error`, `latency`, `segment_id`, `service_id`, `service_instance_id`, `start_time`, `statement`, `tags`, `teamID`, `time_bucket`, `trace_id`) VALUES ('CgEwEiQzMjI5YjdjZC1mM2EyLTQzNTktYWEyNC05NDYzODhjOWNjNTQaggQY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAEY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAIY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAMY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAQY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAUY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAYY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAcY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAgY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAkY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAoY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAsY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAwY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA0Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA4Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA8Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBAY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBEY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBIY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBMY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5IixzZXJ2aWNlXzQ2ZGEwZGFiLWUyN2QtNDgyMC1hZWEyLTliZmMxNTc0MTYxNSo0c2VydmljZV9pbnN0YW5jZWFjODlhNGI3LTgxZjctNDNlOC04NWVkLWQyYjU3OGQ5ODA1MA==', 1697032066304, '36b2d9ff-4c25-49f3-a726-eea812564411', '355f96cd-b1b1-4688-a5f6-a8e3f3a55c9a', false, 3, '3229b7cd-f3a2-4359-aa24-946388c9cc54', 'service_46da0dab-e27d-4820-aea2-9bfc15741615', 'service_instanceac89a4b7-81f7-43e8-85ed-d2b578d98050', 1697032066304, 'statement: b9903670-3821-4f4c-a587-bbcf02c04b77', ['[tagKey_5=tagValue_5, tagKey_3=tagValue_3, tagKey_1=tagValue_1, tagKey_16=tagValue_16, tagKey_8=tagValue_8, tagKey_15=tagValue_15, tagKey_6=tagValue_6, tagKey_11=tagValue_11, tagKey_10=tagValue_10, tagKey_4=tagValue_4, tagKey_13=tagValue_13, tagKey_14=tagValue_14, tagKey_2=tagValue_2, tagKey_17=tagValue_17, tagKey_19=tagValue_19, tagKey_0=tagValue_0, tagKey_18=tagValue_18, tagKey_9=tagValue_9, tagKey_7=tagValue_7, tagKey_12=tagValue_12]'], '0', 0, '0'); """, 1 - getRowCount(1) - qt_sql """ select * from ${table}; """ + getRowCount(1) + qt_sql """ select * from ${table}; """ - sql " set enable_unique_key_partial_update=true " - none_group_commit_insert """ + sql " set enable_unique_key_partial_update=true " + none_group_commit_insert """ INSERT INTO ${table} (`data_binary`, `end_time`, `endpoint_id`, `endpoint_name`, `is_error`, `latency`, `segment_id`, `service_id`, `service_instance_id`, `start_time`, `statement`, `tags`, `teamID`, `time_bucket`, `trace_id`) VALUES ('CgEwEiQzMjI5YjdjZC1mM2EyLTQzNTktYWEyNC05NDYzODhjOWNjNTQaggQY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAEY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAIY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAMY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAQY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAUY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAYY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAcY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAgY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAkY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAoY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAsY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECAwY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA0Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA4Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECA8Y/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBAY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBEY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBIY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5GoQECBMY/6n597ExIP+p+fexMWIWCgh0YWdLZXlfMBIKdGFnVmFsdWVfMGIWCgh0YWdLZXlfMRIKdGFnVmFsdWVfMWIWCgh0YWdLZXlfMhIKdGFnVmFsdWVfMmIWCgh0YWdLZXlfMxIKdGFnVmFsdWVfM2IWCgh0YWdLZXlfNBIKdGFnVmFsdWVfNGIWCgh0YWdLZXlfNRIKdGFnVmFsdWVfNWIWCgh0YWdLZXlfNhIKdGFnVmFsdWVfNmIWCgh0YWdLZXlfNxIKdGFnVmFsdWVfN2IWCgh0YWdLZXlfOBIKdGFnVmFsdWVfOGIWCgh0YWdLZXlfORIKdGFnVmFsdWVfOWIYCgl0YWdLZXlfMTASC3RhZ1ZhbHVlXzEwYhgKCXRhZ0tleV8xMRILdGFnVmFsdWVfMTFiGAoJdGFnS2V5XzEyEgt0YWdWYWx1ZV8xMmIYCgl0YWdLZXlfMTMSC3RhZ1ZhbHVlXzEzYhgKCXRhZ0tleV8xNBILdGFnVmFsdWVfMTRiGAoJdGFnS2V5XzE1Egt0YWdWYWx1ZV8xNWIYCgl0YWdLZXlfMTYSC3RhZ1ZhbHVlXzE2YhgKCXRhZ0tleV8xNxILdGFnVmFsdWVfMTdiGAoJdGFnS2V5XzE4Egt0YWdWYWx1ZV8xOGIYCgl0YWdLZXlfMTkSC3RhZ1ZhbHVlXzE5IixzZXJ2aWNlXzQ2ZGEwZGFiLWUyN2QtNDgyMC1hZWEyLTliZmMxNTc0MTYxNSo0c2VydmljZV9pbnN0YW5jZWFjODlhNGI3LTgxZjctNDNlOC04NWVkLWQyYjU3OGQ5ODA1MA==', 1697032066304, '36b2d9ff-4c25-49f3-a726-eea812564411', '355f96cd-b1b1-4688-a5f6-a8e3f3a55c9a', false, 3, '3229b7cd-f3a2-4359-aa24-946388c9cc54', 'service_46da0dab-e27d-4820-aea2-9bfc15741615', 'service_instanceac89a4b7-81f7-43e8-85ed-d2b578d98050', 1697032066304, 'statement: b9903670-3821-4f4c-a587-bbcf02c04b77', ['[tagKey_5=tagValue_5, tagKey_3=tagValue_3, tagKey_1=tagValue_1, tagKey_16=tagValue_16, tagKey_8=tagValue_8, tagKey_15=tagValue_15, tagKey_6=tagValue_6, tagKey_11=tagValue_11, tagKey_10=tagValue_10, tagKey_4=tagValue_4, tagKey_13=tagValue_13, tagKey_14=tagValue_14, tagKey_2=tagValue_2, tagKey_17=tagValue_17, tagKey_19=tagValue_19, tagKey_0=tagValue_0, tagKey_18=tagValue_18, tagKey_9=tagValue_9, tagKey_7=tagValue_7, tagKey_12=tagValue_12]'], '0', 0, '0'); """, 1 - } - } finally { - // try_sql("DROP TABLE ${table}") } + } finally { + // try_sql("DROP TABLE ${table}") + } - // table with MaterializedView - tableName = "insert_group_commit_into_mv" - table = dbName + "." + tableName - def table_tmp = dbName + ".test_table_tmp" - try { - // create table - sql """ drop table if exists ${table}; """ - sql """CREATE table ${table} ( + // table with MaterializedView + tableName = "insert_group_commit_into_mv" + table = dbName + "." + tableName + def table_tmp = dbName + ".test_table_tmp" + try { + // create table + sql """ drop table if exists ${table}; """ + sql """CREATE table ${table} ( `ordernum` varchar(65533) NOT NULL , `dnt` datetime NOT NULL , `data` json NULL @@ -388,8 +365,8 @@ suite("insert_group_commit_into") { "replication_allocation" = "tag.location.default: 1", "group_commit_interval_ms" = "200" );""" - sql """drop table if exists ${table_tmp};""" - sql """CREATE TABLE ${table_tmp} ( + sql """drop table if exists ${table_tmp};""" + sql """CREATE TABLE ${table_tmp} ( `dnt` varchar(200) NULL, `ordernum` varchar(200) NULL, `type` varchar(20) NULL, @@ -408,42 +385,36 @@ suite("insert_group_commit_into") { "replication_allocation" = "tag.location.default: 1", "group_commit_interval_ms" = "200" ); """ - sql """DROP MATERIALIZED VIEW IF EXISTS ods_zn_dnt_max1 ON ${table};""" - createMV("""create materialized view ods_zn_dnt_max1 as + sql """DROP MATERIALIZED VIEW IF EXISTS ods_zn_dnt_max1 ON ${table};""" + createMV("""create materialized view ods_zn_dnt_max1 as select ordernum,max(dnt) as dnt from ${table} group by ordernum ORDER BY ordernum;""") - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ - // 1. insert into - int count = 0; - while (count < 30) { - try { - group_commit_insert """ + // 1. insert into + int count = 0; + while (count < 30) { + try { + group_commit_insert """ insert into ${table} values('cib2205045_1_1s','2023/6/10 3:55:33','{"DB1":168939,"DNT":"2023-06-10 03:55:33"}');""", 1 - break - } catch (Exception e) { - logger.info("got exception:" + e) - if (e.getMessage().contains("is blocked on schema change")) { - Thread.sleep(1000) - } - count++ + break + } catch (Exception e) { + logger.info("got exception:" + e) + if (e.getMessage().contains("is blocked on schema change")) { + Thread.sleep(1000) } + count++ } - group_commit_insert """insert into ${table} values('cib2205045_1_1s','2023/6/10 3:56:33','{"DB1":168939,"DNT":"2023-06-10 03:56:33"}');""", 1 - group_commit_insert """insert into ${table} values('cib2205045_1_1s','2023/6/10 3:57:33','{"DB1":168939,"DNT":"2023-06-10 03:57:33"}');""", 1 - group_commit_insert """insert into ${table} values('cib2205045_1_1s','2023/6/10 3:58:33','{"DB1":168939,"DNT":"2023-06-10 03:58:33"}');""", 1 + } + group_commit_insert """insert into ${table} values('cib2205045_1_1s','2023/6/10 3:56:33','{"DB1":168939,"DNT":"2023-06-10 03:56:33"}');""", 1 + group_commit_insert """insert into ${table} values('cib2205045_1_1s','2023/6/10 3:57:33','{"DB1":168939,"DNT":"2023-06-10 03:57:33"}');""", 1 + group_commit_insert """insert into ${table} values('cib2205045_1_1s','2023/6/10 3:58:33','{"DB1":168939,"DNT":"2023-06-10 03:58:33"}');""", 1 - getRowCount(4) + getRowCount(4) - qt_order """select + qt_order """select '2023-06-10', tmp.ordernum, cast(nvl(if(tmp.p0-tmp1.p0>0,tmp.p0-tmp1.p0,tmp.p0-tmp.p1),0) as decimal(10,4)), @@ -479,7 +450,7 @@ suite("insert_group_commit_into") { group by ordernum )tmp1 on tmp.ordernum=tmp1.ordernum;""" - qt_order2 """ + qt_order2 """ SELECT row_number() over(partition by add_date order by pc_num desc) ,row_number() over(partition by add_date order by vc_num desc) @@ -492,17 +463,17 @@ suite("insert_group_commit_into") { ,row_number() over(order by dnt) vt_num FROM ${table} ) t;""" - } - } finally { } + } finally { + } - // column name contains keyword - tableName = "insert_group_commit_into_with_keyword" - table = dbName + "." + tableName - try { - // create table - sql """ drop table if exists ${table}; """ - sql """ + // column name contains keyword + tableName = "insert_group_commit_into_with_keyword" + table = dbName + "." + tableName + try { + // create table + sql """ drop table if exists ${table}; """ + sql """ CREATE TABLE IF NOT EXISTS ${table} ( k1 INT, @@ -516,20 +487,13 @@ suite("insert_group_commit_into") { ); """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner = true; """ - sql """ set enable_fallback_to_original_planner = false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - group_commit_insert """ insert into ${table} values(1, 'test'); """, 1 - group_commit_insert """ insert into ${table}(k1,`or`) values (2,"or"); """, 1 - getRowCount(2) - order_qt_select8 """ select * from ${table}; """ - } - } finally { + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ + group_commit_insert """ insert into ${table} values(1, 'test'); """, 1 + group_commit_insert """ insert into ${table}(k1,`or`) values (2,"or"); """, 1 + getRowCount(2) + order_qt_select8 """ select * from ${table}; """ } + } finally { } } diff --git a/regression-test/suites/insert_p0/insert_group_commit_into_max_filter_ratio.groovy b/regression-test/suites/insert_p0/insert_group_commit_into_max_filter_ratio.groovy index 64ae30f8f8a63f..53faa68816e422 100644 --- a/regression-test/suites/insert_p0/insert_group_commit_into_max_filter_ratio.groovy +++ b/regression-test/suites/insert_p0/insert_group_commit_into_max_filter_ratio.groovy @@ -168,69 +168,49 @@ suite("insert_group_commit_into_max_filter_ratio") { // if enable strict mode // 100 rows(success, fail), 10000 rows(success, fail), 15000 rows(success, fail) // async mode, sync mode, off mode - for (item in ["legacy", "nereids"]) { - sql """ truncate table ${tableName} """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } + sql """ truncate table ${tableName} """ + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = sync_mode; """ - group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10); """, 1 - sql """ set group_commit = async_mode; """ - group_commit_insert """ insert into ${dbTableName}(id) values(2); """, 1 - sql """ set group_commit = off_mode; """ - off_mode_group_commit_insert """ insert into ${dbTableName} values (3, 'a', 10); """, 1 + sql """ set group_commit = sync_mode; """ + group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10); """, 1 + sql """ set group_commit = async_mode; """ + group_commit_insert """ insert into ${dbTableName}(id) values(2); """, 1 + sql """ set group_commit = off_mode; """ + off_mode_group_commit_insert """ insert into ${dbTableName} values (3, 'a', 10); """, 1 - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - group_commit_insert """ insert into ${dbTableName} values (4, 'abc', 10); """, 0 - } else { - fail_group_commit_insert """ insert into ${dbTableName} values (4, 'abc', 10); """, 0 - } - sql """ set enable_insert_strict = false; """ - group_commit_insert """ insert into ${dbTableName} values (5, 'abc', 10); """, 0 - - // The row 6 and 7 is different between legacy and nereids - try { - sql """ set group_commit = off_mode; """ - sql """ set enable_insert_strict = true; """ - sql """ insert into ${dbTableName} values (6, 'a', 'a'); """ - } catch (Exception e) { - logger.info("exception: " + e) - assertTrue(e.toString().contains("Invalid number format")) - } + sql """ set group_commit = async_mode; """ + group_commit_insert """ insert into ${dbTableName} values (4, 'abc', 10); """, 0 + sql """ set enable_insert_strict = false; """ + group_commit_insert """ insert into ${dbTableName} values (5, 'abc', 10); """, 0 - try { - sql """ set group_commit = off_mode; """ - sql """ set enable_insert_strict = false; """ - sql """ insert into ${dbTableName} values (7, 'a', 'a'); """ - } catch (Exception e) { - logger.info("exception: " + e) - assertTrue(e.toString().contains("Invalid number format")) - } - - // TODO should throw exception? - sql """ set group_commit = async_mode; """ + // The row 6 and 7 is different between legacy and nereids + try { + sql """ set group_commit = off_mode; """ sql """ set enable_insert_strict = true; """ - if (item == "nereids") { - // will write [8, a, null] - // group_commit_insert """ insert into ${dbTableName} values (8, 'a', 'a'); """, 1 - } else { - fail_group_commit_insert """ insert into ${dbTableName} values (8, 'a', 'a'); """, 0 - } - sql """ set group_commit = async_mode; """ - sql """ set enable_insert_strict = false; """ - group_commit_insert """ insert into ${dbTableName} values (9, 'a', 'a'); """, 1 + sql """ insert into ${dbTableName} values (6, 'a', 'a'); """ + } catch (Exception e) { + logger.info("exception: " + e) + assertTrue(e.toString().contains("Invalid number format")) } - if (item == "nereids") { - get_row_count_with_retry(6) - } else { - get_row_count_with_retry(4) + + try { + sql """ set group_commit = off_mode; """ + sql """ set enable_insert_strict = false; """ + sql """ insert into ${dbTableName} values (7, 'a', 'a'); """ + } catch (Exception e) { + logger.info("exception: " + e) + assertTrue(e.toString().contains("Invalid number format")) } + + // TODO should throw exception? + sql """ set group_commit = async_mode; """ + sql """ set enable_insert_strict = true; """ + // will write [8, a, null] + // group_commit_insert """ insert into ${dbTableName} values (8, 'a', 'a'); """, 1 + sql """ set group_commit = async_mode; """ + sql """ set enable_insert_strict = false; """ + group_commit_insert """ insert into ${dbTableName} values (9, 'a', 'a'); """, 1 + get_row_count_with_retry(6) order_qt_sql """ select * from ${dbTableName} """ } sql """ truncate table ${tableName} """ diff --git a/regression-test/suites/insert_p0/insert_group_commit_into_unique.groovy b/regression-test/suites/insert_p0/insert_group_commit_into_unique.groovy index 8ae0d41565d488..74f2f345780200 100644 --- a/regression-test/suites/insert_p0/insert_group_commit_into_unique.groovy +++ b/regression-test/suites/insert_p0/insert_group_commit_into_unique.groovy @@ -69,15 +69,14 @@ suite("insert_group_commit_into_unique") { } } - for (item in ["legacy", "nereids"]) { - // 1. table without sequence column - try { - tableName = "insert_group_commit_into_unique" + "1_" + item - dbTableName = dbName + "." + tableName - // create table - sql """ drop table if exists ${dbTableName}; """ - - sql """ + // 1. table without sequence column + try { + tableName = "insert_group_commit_into_unique1" + dbTableName = dbName + "." + tableName + // create table + sql """ drop table if exists ${dbTableName}; """ + + sql """ CREATE TABLE ${dbTableName} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -87,82 +86,75 @@ suite("insert_group_commit_into_unique") { DISTRIBUTED BY HASH(`id`) BUCKETS 1 PROPERTIES ( "replication_num" = "1", - "group_commit_interval_ms" = "100" + "group_commit_interval_ms" = "500" ); """ - // 1. insert into - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 - group_commit_insert """ insert into ${dbTableName}(id) values(6); """, 1 - group_commit_insert """ insert into ${dbTableName}(id) values(4); """, 1 - group_commit_insert """ insert into ${dbTableName}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name) values(2, 'b'); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 - - /*getRowCount(5) - qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ - } + // 1. insert into + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ + group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 + group_commit_insert """ insert into ${dbTableName}(id) values(6); """, 1 + group_commit_insert """ insert into ${dbTableName}(id) values(4); """, 1 + group_commit_insert """ insert into ${dbTableName}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name) values(2, 'b'); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 + + /*getRowCount(5) + qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ + } - // 2. stream load - streamLoad { - table "${tableName}" + // 2. stream load + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'group_commit', 'async_mode' - set 'columns', 'id, name, score' - file "test_group_commit_1.csv" - unset 'label' + set 'column_separator', ',' + set 'group_commit', 'async_mode' + set 'columns', 'id, name, score' + file "test_group_commit_1.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 4, 4, 0, 0) - } + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 4, 4, 0, 0) } - /*getRowCount(9) - qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ + } + /*getRowCount(9) + qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ - streamLoad { - table "${tableName}" + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'group_commit', 'async_mode' - set 'columns', 'id, name, score, __DORIS_DELETE_SIGN__' - file "test_group_commit_2.csv" - unset 'label' + set 'column_separator', ',' + set 'group_commit', 'async_mode' + set 'columns', 'id, name, score, __DORIS_DELETE_SIGN__' + file "test_group_commit_2.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 5, 5, 0, 0) - } + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 5, 5, 0, 0) } - getRowCount(12) - sql """ set show_hidden_columns = true """ - qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - sql """ set show_hidden_columns = false """ - qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - } finally { - // try_sql("DROP TABLE ${dbTableName}") } + getRowCount(12) + sql """ set show_hidden_columns = true """ + qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + sql """ set show_hidden_columns = false """ + qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + } finally { + // try_sql("DROP TABLE ${dbTableName}") + } - // 2. table with "function_column.sequence_col" - try { - tableName = "insert_group_commit_into_unique" + "2_" + item - dbTableName = dbName + "." + tableName - // create table - sql """ drop table if exists ${dbTableName}; """ + // 2. table with "function_column.sequence_col" + try { + tableName = "insert_group_commit_into_unique2" + dbTableName = dbName + "." + tableName + // create table + sql """ drop table if exists ${dbTableName}; """ - sql """ + sql """ CREATE TABLE ${dbTableName} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -173,83 +165,77 @@ suite("insert_group_commit_into_unique") { PROPERTIES ( "replication_num" = "1", "function_column.sequence_col" = "score", - "group_commit_interval_ms" = "100" + "group_commit_interval_ms" = "500" ); """ - // 1. insert into - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 - group_commit_insert """ insert into ${dbTableName}(id, score) values(6, 60); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, score) values(4, 70); """, 1 - group_commit_insert """ insert into ${dbTableName}(name, id, score) values('c', 3, 30); """, 1 - group_commit_insert """ insert into ${dbTableName}(score, id, name) values(30, 2, 'b'); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 - - /*getRowCount(5) - qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ - }; - - // 2. stream load - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'group_commit', 'async_mode' - set 'columns', 'id, name, score' - file "test_group_commit_1.csv" - unset 'label' - - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 4, 4, 0, 0) - } - } - /*getRowCount(9) + // 1. insert into + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ + + group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 + group_commit_insert """ insert into ${dbTableName}(id, score) values(6, 60); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, score) values(4, 70); """, 1 + group_commit_insert """ insert into ${dbTableName}(name, id, score) values('c', 3, 30); """, 1 + group_commit_insert """ insert into ${dbTableName}(score, id, name) values(30, 2, 'b'); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 + + /*getRowCount(5) qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ + }; - streamLoad { - table "${tableName}" + // 2. stream load + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'group_commit', 'async_mode' - set 'columns', 'id, name, score, __DORIS_DELETE_SIGN__' - file "test_group_commit_2.csv" - unset 'label' + set 'column_separator', ',' + set 'group_commit', 'async_mode' + set 'columns', 'id, name, score' + file "test_group_commit_1.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 5, 5, 0, 0) - } + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 4, 4, 0, 0) } - getRowCount(12) - sql """ set show_hidden_columns = true """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - sql """ set show_hidden_columns = false """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - } finally { - // try_sql("DROP TABLE ${dbTableName}") - sql """ set show_hidden_columns = false """ } + /*getRowCount(9) + qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ + + streamLoad { + table "${tableName}" - // 3. table with "function_column.sequence_type" - try { - tableName = "insert_group_commit_into_unique" + "3_" + item - dbTableName = dbName + "." + tableName - // create table - sql """ drop table if exists ${dbTableName}; """ + set 'column_separator', ',' + set 'group_commit', 'async_mode' + set 'columns', 'id, name, score, __DORIS_DELETE_SIGN__' + file "test_group_commit_2.csv" + unset 'label' - sql """ + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 5, 5, 0, 0) + } + } + getRowCount(12) + sql """ set show_hidden_columns = true """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + sql """ set show_hidden_columns = false """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + } finally { + // try_sql("DROP TABLE ${dbTableName}") + sql """ set show_hidden_columns = false """ + } + + // 3. table with "function_column.sequence_type" + try { + tableName = "insert_group_commit_into_unique3" + dbTableName = dbName + "." + tableName + // create table + sql """ drop table if exists ${dbTableName}; """ + + sql """ CREATE TABLE ${dbTableName} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -260,76 +246,69 @@ suite("insert_group_commit_into_unique") { PROPERTIES ( "replication_num" = "1", "function_column.sequence_type" = "int", - "group_commit_interval_ms" = "100" + "group_commit_interval_ms" = "500" ); """ - // 1. insert into - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_SEQUENCE_COL__) values (1, 'a', 10, 100),(5, 'q', 50, 500); """, 2 - group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 60, 600); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 50, 500); """, 1 - group_commit_insert """ insert into ${dbTableName}(name, id, score, __DORIS_SEQUENCE_COL__) values('c', 3, 30, 300); """, 1 - group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__) values(30, 2, 'b', 200); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__, __DORIS_SEQUENCE_COL__) values(1, 'a', 200, 1, 200) """, 1 - group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__) values(30, 2, 'b', 100, 1); """, 1 - - /*getRowCount(4) - qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ - }; - - // 2. stream load - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'group_commit', 'async_mode' - set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__' - set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' - file "test_group_commit_3.csv" - unset 'label' - - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 4, 4, 0, 0) - } - } - /*getRowCount(9) + // 1. insert into + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ + + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_SEQUENCE_COL__) values (1, 'a', 10, 100),(5, 'q', 50, 500); """, 2 + group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 60, 600); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 50, 500); """, 1 + group_commit_insert """ insert into ${dbTableName}(name, id, score, __DORIS_SEQUENCE_COL__) values('c', 3, 30, 300); """, 1 + group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__) values(30, 2, 'b', 200); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__, __DORIS_SEQUENCE_COL__) values(1, 'a', 200, 1, 200) """, 1 + group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__) values(30, 2, 'b', 100, 1); """, 1 + + /*getRowCount(4) qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ + }; + + // 2. stream load + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'group_commit', 'async_mode' + set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__' + set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' + file "test_group_commit_3.csv" + unset 'label' + + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 4, 4, 0, 0) + } + } + /*getRowCount(9) + qt_sql """ select * from ${dbTableName} order by id, name, score asc; """*/ - streamLoad { - table "${tableName}" + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'group_commit', 'async_mode' - set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__' - set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' - file "test_group_commit_4.csv" - unset 'label' + set 'column_separator', ',' + set 'group_commit', 'async_mode' + set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__' + set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' + file "test_group_commit_4.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 7, 7, 0, 0) - } + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 7, 7, 0, 0) } - getRowCount(10) - sql """ set show_hidden_columns = true """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - sql """ set show_hidden_columns = false """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - } finally { - // try_sql("DROP TABLE ${dbTableName}") - sql """ set show_hidden_columns = false """ } + getRowCount(10) + sql """ set show_hidden_columns = true """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + sql """ set show_hidden_columns = false """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + } finally { + // try_sql("DROP TABLE ${dbTableName}") + sql """ set show_hidden_columns = false """ } } diff --git a/regression-test/suites/insert_p0/insert_group_commit_into_unique_sync_mode.groovy b/regression-test/suites/insert_p0/insert_group_commit_into_unique_sync_mode.groovy index f58b306ab4ef66..50586c829345ae 100644 --- a/regression-test/suites/insert_p0/insert_group_commit_into_unique_sync_mode.groovy +++ b/regression-test/suites/insert_p0/insert_group_commit_into_unique_sync_mode.groovy @@ -106,15 +106,14 @@ suite("insert_group_commit_into_unique_sync_mode") { } } - for (item in ["legacy", "nereids"]) { - // 1. table without sequence column - try { - tableName = "insert_group_commit_into_unique_s_" + "1_" + item - dbTableName = dbName + "." + tableName - // create table - sql """ drop table if exists ${dbTableName}; """ - - sql """ + // 1. table without sequence column + try { + tableName = "insert_group_commit_into_unique_s_1" + dbTableName = dbName + "." + tableName + // create table + sql """ drop table if exists ${dbTableName}; """ + + sql """ CREATE TABLE ${dbTableName} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -128,79 +127,73 @@ suite("insert_group_commit_into_unique_sync_mode") { ); """ - // 1. insert into - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = sync_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 - group_commit_insert """ insert into ${dbTableName}(id) values(6); """, 1 - group_commit_insert """ insert into ${dbTableName}(id) values(4); """, 1 - group_commit_insert """ insert into ${dbTableName}(name, id) values('c', 3); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name) values(2, 'b'); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 - - getRowCount(5) - // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ - } + // 1. insert into + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = sync_mode; """ + + group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 + group_commit_insert """ insert into ${dbTableName}(id) values(6); """, 1 + group_commit_insert """ insert into ${dbTableName}(id) values(4); """, 1 + group_commit_insert """ insert into ${dbTableName}(name, id) values('c', 3); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name) values(2, 'b'); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 - // 2. stream load - streamLoad { - table "${tableName}" + getRowCount(5) + // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ + } - set 'column_separator', ',' - set 'group_commit', 'sync_mode' - set 'columns', 'id, name, score' - file "test_group_commit_1.csv" - unset 'label' + // 2. stream load + streamLoad { + table "${tableName}" - time 10000 // limit inflight 10s + set 'column_separator', ',' + set 'group_commit', 'sync_mode' + set 'columns', 'id, name, score' + file "test_group_commit_1.csv" + unset 'label' - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 4, 4, 0, 0) - } + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 4, 4, 0, 0) } - getRowCount(9) - // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ + } + getRowCount(9) + // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ - streamLoad { - set 'version', '1' - set 'sql', """ + streamLoad { + set 'version', '1' + set 'sql', """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) select * from http_stream ("format"="csv", "column_separator"=",") """ - set 'group_commit', 'sync_mode' - file "test_group_commit_2.csv" - unset 'label' + set 'group_commit', 'sync_mode' + file "test_group_commit_2.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 5, 5, 0, 0) - } + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 5, 5, 0, 0) } - getRowCount(12) - sql """ set show_hidden_columns = true """ - qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - sql """ set show_hidden_columns = false """ - qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - } finally { - // try_sql("DROP TABLE ${dbTableName}") } + getRowCount(12) + sql """ set show_hidden_columns = true """ + qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + sql """ set show_hidden_columns = false """ + qt_sql """ select id, name, score, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + } finally { + // try_sql("DROP TABLE ${dbTableName}") + } - // 2. table with "function_column.sequence_col" - try { - tableName = "insert_group_commit_into_unique_s_" + "2_" + item - dbTableName = dbName + "." + tableName - // create table - sql """ drop table if exists ${dbTableName}; """ + // 2. table with "function_column.sequence_col" + try { + tableName = "insert_group_commit_into_unique_s_2" + dbTableName = dbName + "." + tableName + // create table + sql """ drop table if exists ${dbTableName}; """ - sql """ + sql """ CREATE TABLE ${dbTableName} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -215,82 +208,76 @@ suite("insert_group_commit_into_unique_sync_mode") { ); """ - // 1. insert into - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = sync_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 - group_commit_insert """ insert into ${dbTableName}(id, score) values(6, 60); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, score) values(4, 70); """, 1 - group_commit_insert """ insert into ${dbTableName}(name, id, score) values('c', 3, 30); """, 1 - sql """ set group_commit = OFF_MODE; """ - off_mode_group_commit_insert """ insert into ${dbTableName}(score, id, name) values(30, 2, 'b'); """, 1 - sql """ set group_commit = sync_mode; """ - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 - - getRowCount(5) - // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ - }; - - // 2. stream load - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'group_commit', 'SYNC_mode' - set 'columns', 'id, name, score' - file "test_group_commit_1.csv" - unset 'label' - - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 4, 4, 0, 0) - } - } - getRowCount(9) + // 1. insert into + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = sync_mode; """ + + group_commit_insert """ insert into ${dbTableName} values (1, 'a', 10),(5, 'q', 50); """, 2 + group_commit_insert """ insert into ${dbTableName}(id, score) values(6, 60); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, score) values(4, 70); """, 1 + group_commit_insert """ insert into ${dbTableName}(name, id, score) values('c', 3, 30); """, 1 + sql """ set group_commit = OFF_MODE; """ + off_mode_group_commit_insert """ insert into ${dbTableName}(score, id, name) values(30, 2, 'b'); """, 1 + sql """ set group_commit = sync_mode; """ + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__) values(1, 'a', 10, 1) """, 1 + + getRowCount(5) // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ + }; + + // 2. stream load + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'group_commit', 'SYNC_mode' + set 'columns', 'id, name, score' + file "test_group_commit_1.csv" + unset 'label' + + time 10000 // limit inflight 10s - streamLoad { - set 'version', '1' - set 'sql', """ + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 4, 4, 0, 0) + } + } + getRowCount(9) + // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ + + streamLoad { + set 'version', '1' + set 'sql', """ insert into ${dbTableName} (id, name, score, __DORIS_DELETE_SIGN__) select * from http_stream ("format"="csv", "column_separator"=",") """ - set 'group_commit', 'off_mode' - file "test_group_commit_2.csv" - unset 'label' + set 'group_commit', 'off_mode' + file "test_group_commit_2.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkOffModeStreamLoadResult(exception, result, 5, 5, 0, 0) - } + check { result, exception, startTime, endTime -> + checkOffModeStreamLoadResult(exception, result, 5, 5, 0, 0) } - getRowCount(12) - sql """ set show_hidden_columns = true """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - sql """ set show_hidden_columns = false """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - } finally { - // try_sql("DROP TABLE ${dbTableName}") - sql """ set show_hidden_columns = false """ } + getRowCount(12) + sql """ set show_hidden_columns = true """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + sql """ set show_hidden_columns = false """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + } finally { + // try_sql("DROP TABLE ${dbTableName}") + sql """ set show_hidden_columns = false """ + } - // 3. table with "function_column.sequence_type" - try { - tableName = "insert_group_commit_into_unique_s_" + "3_" + item - dbTableName = dbName + "." + tableName - // create table - sql """ drop table if exists ${dbTableName}; """ + // 3. table with "function_column.sequence_type" + try { + tableName = "insert_group_commit_into_unique_s_3" + dbTableName = dbName + "." + tableName + // create table + sql """ drop table if exists ${dbTableName}; """ - sql """ + sql """ CREATE TABLE ${dbTableName} ( `id` int(11) NOT NULL, `name` varchar(50) NULL, @@ -305,72 +292,65 @@ suite("insert_group_commit_into_unique_sync_mode") { ); """ - // 1. insert into - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = sync_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_SEQUENCE_COL__) values (1, 'a', 10, 100),(5, 'q', 50, 500); """, 2 - group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 60, 600); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 50, 500); """, 1 - group_commit_insert """ insert into ${dbTableName}(name, id, score, __DORIS_SEQUENCE_COL__) values('c', 3, 30, 300); """, 1 - group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__) values(30, 2, 'b', 200); """, 1 - group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__, __DORIS_SEQUENCE_COL__) values(1, 'a', 200, 1, 200) """, 1 - group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__) values(30, 2, 'b', 100, 1); """, 1 - - getRowCount(4) - // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ - }; - - // 2. stream load - streamLoad { - table "${tableName}" - - set 'column_separator', ',' - set 'group_commit', 'SYNC_MODE' - set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__' - set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' - file "test_group_commit_3.csv" - unset 'label' - - time 10000 // limit inflight 10s - - check { result, exception, startTime, endTime -> - checkStreamLoadResult(exception, result, 4, 4, 0, 0) - } - } - getRowCount(8) + // 1. insert into + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = sync_mode; """ + + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_SEQUENCE_COL__) values (1, 'a', 10, 100),(5, 'q', 50, 500); """, 2 + group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 60, 600); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, score, __DORIS_SEQUENCE_COL__) values(6, 50, 500); """, 1 + group_commit_insert """ insert into ${dbTableName}(name, id, score, __DORIS_SEQUENCE_COL__) values('c', 3, 30, 300); """, 1 + group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__) values(30, 2, 'b', 200); """, 1 + group_commit_insert """ insert into ${dbTableName}(id, name, score, __DORIS_DELETE_SIGN__, __DORIS_SEQUENCE_COL__) values(1, 'a', 200, 1, 200) """, 1 + group_commit_insert """ insert into ${dbTableName}(score, id, name, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__) values(30, 2, 'b', 100, 1); """, 1 + + getRowCount(4) // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ + }; + + // 2. stream load + streamLoad { + table "${tableName}" + + set 'column_separator', ',' + set 'group_commit', 'SYNC_MODE' + set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__' + set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' + file "test_group_commit_3.csv" + unset 'label' + + time 10000 // limit inflight 10s + + check { result, exception, startTime, endTime -> + checkStreamLoadResult(exception, result, 4, 4, 0, 0) + } + } + getRowCount(8) + // qt_sql """ select * from ${dbTableName} order by id, name, score asc; """ - streamLoad { - table "${tableName}" + streamLoad { + table "${tableName}" - set 'column_separator', ',' - set 'group_commit', 'OFF_mode' - set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__' - set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' - file "test_group_commit_4.csv" - unset 'label' + set 'column_separator', ',' + set 'group_commit', 'OFF_mode' + set 'columns', 'id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__' + set 'function_column.sequence_col', '__DORIS_SEQUENCE_COL__' + file "test_group_commit_4.csv" + unset 'label' - time 10000 // limit inflight 10s + time 10000 // limit inflight 10s - check { result, exception, startTime, endTime -> - checkOffModeStreamLoadResult(exception, result, 7, 7, 0, 0) - } + check { result, exception, startTime, endTime -> + checkOffModeStreamLoadResult(exception, result, 7, 7, 0, 0) } - getRowCount(10) - sql """ set show_hidden_columns = true """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - sql """ set show_hidden_columns = false """ - qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ - } finally { - // try_sql("DROP TABLE ${dbTableName}") - sql """ set show_hidden_columns = false """ } + getRowCount(10) + sql """ set show_hidden_columns = true """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + sql """ set show_hidden_columns = false """ + qt_sql """ select id, name, score, __DORIS_SEQUENCE_COL__, __DORIS_DELETE_SIGN__ from ${dbTableName} order by id, name, score asc; """ + } finally { + // try_sql("DROP TABLE ${dbTableName}") + sql """ set show_hidden_columns = false """ } } diff --git a/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy b/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy index f59c9bb8b00c69..e207e71f2cc964 100644 --- a/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy +++ b/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy @@ -43,12 +43,11 @@ suite("insert_group_commit_with_exception") { return true } - for (item in ["legacy", "nereids"]) { - try { - // create table - sql """ drop table if exists ${table}; """ + try { + // create table + sql """ drop table if exists ${table}; """ - sql """ + sql """ CREATE TABLE `${table}` ( `id` int(11) NOT NULL, `name` varchar(1100) NULL, @@ -62,234 +61,214 @@ suite("insert_group_commit_with_exception") { ); """ - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - sql "set enable_server_side_prepared_statement = true" - } else { - sql """ set enable_nereids_planner = false; """ - sql "set enable_server_side_prepared_statement = false" - } + sql """ set group_commit = async_mode; """ + sql "set enable_server_side_prepared_statement = true" + // insert into without column + try { + def result = sql """ insert into ${table} values(1, 'a', 10, 100) """ + assertTrue(false) + } catch (Exception e) { + assertTrue(e.getMessage().contains("Column count doesn't match value count")) + } - // insert into without column - try { - def result = sql """ insert into ${table} values(1, 'a', 10, 100) """ + try { + def result = sql """ insert into ${table} values(2, 'b') """ + assertTrue(false) + } catch (Exception e) { + assertTrue(e.getMessage().contains("Column count doesn't match value count")) + } + + result = sql """ insert into ${table} values(3, 'c', 30) """ + logger.info("insert result: " + result) + + // insert into with column + result = sql """ insert into ${table}(id, name) values(4, 'd') """ + logger.info("insert result: " + result) + + getRowCount(2) + + try { + result = sql """ insert into ${table}(id, name) values(5, 'd', 50) """ + assertTrue(false) + } catch (Exception e) { + assertTrue(e.getMessage().contains("Column count doesn't match value count")) + } + + try { + result = sql """ insert into ${table}(id, name) values(6) """ + assertTrue(false) + } catch (Exception e) { + assertTrue(e.getMessage().contains("Column count doesn't match value count")) + } + + try { + result = sql """ insert into ${table}(id, names) values(7, 'd') """ + assertTrue(false) + } catch (Exception e) { + assertTrue(e.getMessage().contains("Unknown column 'names'")) + } + + + // prepare insert + def db = context.config.defaultDb + "_insert_p0" + String url = getServerPrepareJdbcUrl(context.config.jdbcUrl, db) + + try (Connection connection = DriverManager.getConnection(url, context.config.jdbcUser, context.config.jdbcPassword)) { + Statement statement = connection.createStatement(); + statement.execute("use ${db}"); + statement.execute("set group_commit = eventual_consistency;"); + statement.execute("set enable_server_side_prepared_statement = true") + // without column + try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?, ?)")) { + ps.setObject(1, 8); + ps.setObject(2, "f"); + ps.setObject(3, 70); + ps.setObject(4, "a"); + ps.addBatch(); + int[] result = ps.executeBatch(); assertTrue(false) } catch (Exception e) { assertTrue(e.getMessage().contains("Column count doesn't match value count")) } - try { - def result = sql """ insert into ${table} values(2, 'b') """ + try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?)")) { + ps.setObject(1, 9); + ps.setObject(2, "f"); + ps.addBatch(); + int[] result = ps.executeBatch(); assertTrue(false) } catch (Exception e) { assertTrue(e.getMessage().contains("Column count doesn't match value count")) } - result = sql """ insert into ${table} values(3, 'c', 30) """ - logger.info("insert result: " + result) - - // insert into with column - result = sql """ insert into ${table}(id, name) values(4, 'd') """ - logger.info("insert result: " + result) + try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?)")) { + ps.setObject(1, 10); + ps.setObject(2, "f"); + ps.setObject(3, 90); + ps.addBatch(); + int[] result = ps.executeBatch(); + logger.info("prepare insert result: " + result) + } - getRowCount(2) + // with columns + try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name) values(?, ?)")) { + ps.setObject(1, 11); + ps.setObject(2, "f"); + ps.addBatch(); + int[] result = ps.executeBatch(); + logger.info("prepare insert result: " + result) + } - try { - result = sql """ insert into ${table}(id, name) values(5, 'd', 50) """ + try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name) values(?, ?, ?)")) { + ps.setObject(1, 12); + ps.setObject(2, "f"); + ps.setObject(3, "f"); + ps.addBatch(); + int[] result = ps.executeBatch(); assertTrue(false) } catch (Exception e) { assertTrue(e.getMessage().contains("Column count doesn't match value count")) } - try { - result = sql """ insert into ${table}(id, name) values(6) """ + try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name) values(?)")) { + ps.setObject(1, 13); + ps.addBatch(); + int[] result = ps.executeBatch(); assertTrue(false) } catch (Exception e) { assertTrue(e.getMessage().contains("Column count doesn't match value count")) } - try { - result = sql """ insert into ${table}(id, names) values(7, 'd') """ + try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, names) values(?, ?)")) { + ps.setObject(1, 12); + ps.setObject(2, "f"); + ps.addBatch(); + int[] result = ps.executeBatch(); assertTrue(false) } catch (Exception e) { assertTrue(e.getMessage().contains("Unknown column 'names'")) } + getRowCount(4) - // prepare insert - def db = context.config.defaultDb + "_insert_p0" - String url = getServerPrepareJdbcUrl(context.config.jdbcUrl, db) - - try (Connection connection = DriverManager.getConnection(url, context.config.jdbcUser, context.config.jdbcPassword)) { - Statement statement = connection.createStatement(); - statement.execute("use ${db}"); - statement.execute("set group_commit = eventual_consistency;"); - if (item == "nereids") { - statement.execute("set enable_nereids_planner=true;"); - statement.execute("set enable_fallback_to_original_planner=false;"); - sql "set enable_server_side_prepared_statement = true" - } else { - statement.execute("set enable_nereids_planner = false;") - sql "set enable_server_side_prepared_statement = false" - } - // without column - try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?, ?)")) { - ps.setObject(1, 8); - ps.setObject(2, "f"); - ps.setObject(3, 70); - ps.setObject(4, "a"); - ps.addBatch(); - int[] result = ps.executeBatch(); - assertTrue(false) - } catch (Exception e) { - assertTrue(e.getMessage().contains("Column count doesn't match value count")) - } - - try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?)")) { - ps.setObject(1, 9); - ps.setObject(2, "f"); - ps.addBatch(); - int[] result = ps.executeBatch(); - assertTrue(false) - } catch (Exception e) { - assertTrue(e.getMessage().contains("Column count doesn't match value count")) - } - - try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?)")) { - ps.setObject(1, 10); + // prepare insert with multi rows + try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?)")) { + for (int i = 0; i < 5; i++) { + ps.setObject(1, 13 + i); ps.setObject(2, "f"); ps.setObject(3, 90); ps.addBatch(); int[] result = ps.executeBatch(); logger.info("prepare insert result: " + result) } + } + getRowCount(9) - // with columns - try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name) values(?, ?)")) { - ps.setObject(1, 11); - ps.setObject(2, "f"); - ps.addBatch(); - int[] result = ps.executeBatch(); - logger.info("prepare insert result: " + result) - } - - try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name) values(?, ?, ?)")) { - ps.setObject(1, 12); - ps.setObject(2, "f"); - ps.setObject(3, "f"); - ps.addBatch(); - int[] result = ps.executeBatch(); - assertTrue(false) - } catch (Exception e) { - assertTrue(e.getMessage().contains("Column count doesn't match value count")) - } - - try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name) values(?)")) { - ps.setObject(1, 13); - ps.addBatch(); - int[] result = ps.executeBatch(); - assertTrue(false) - } catch (Exception e) { - assertTrue(e.getMessage().contains("Column count doesn't match value count")) - } - - try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, names) values(?, ?)")) { - ps.setObject(1, 12); + // prepare insert with multi rows + try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?),(?, ?, ?)")) { + for (int i = 0; i < 2; i++) { + ps.setObject(1, 18 + i); ps.setObject(2, "f"); + ps.setObject(3, 90); + ps.setObject(4, 18 + i + 1); + ps.setObject(5, "f"); + ps.setObject(6, 90); ps.addBatch(); int[] result = ps.executeBatch(); - assertTrue(false) - } catch (Exception e) { - assertTrue(e.getMessage().contains("Unknown column 'names'")) - } - - getRowCount(4) - - // prepare insert with multi rows - try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?)")) { - for (int i = 0; i < 5; i++) { - ps.setObject(1, 13 + i); - ps.setObject(2, "f"); - ps.setObject(3, 90); - ps.addBatch(); - int[] result = ps.executeBatch(); - logger.info("prepare insert result: " + result) - } - } - getRowCount(9) - - // prepare insert with multi rows - try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?),(?, ?, ?)")) { - for (int i = 0; i < 2; i++) { - ps.setObject(1, 18 + i); - ps.setObject(2, "f"); - ps.setObject(3, 90); - ps.setObject(4, 18 + i + 1); - ps.setObject(5, "f"); - ps.setObject(6, 90); - ps.addBatch(); - int[] result = ps.executeBatch(); - logger.info("prepare insert result: " + result) - } - } - getRowCount(13) - - // prepare insert without column names, and do schema change - try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?)")) { - ps.setObject(1, 22) - ps.setObject(2, "f") - ps.setObject(3, 90) - ps.addBatch() - int[] result = ps.executeBatch() logger.info("prepare insert result: " + result) - - sql """ alter table ${table} ADD column age int after name; """ - assertTrue(getAlterTableState(), "add column should success") - - try { - ps.setObject(1, 23) - ps.setObject(2, "f") - ps.setObject(3, 90) - ps.addBatch() - result = ps.executeBatch() - assertTrue(false) - } catch (Exception e) { - logger.info("exception : " + e) - if (item == "legacy") { - assertTrue(e.getMessage().contains("Column count doesn't match value count")) - } - if (item == "nereids") { - assertTrue(e.getMessage().contains("insert into cols should be corresponding to the query output")) - } - } } - getRowCount(14) - - // prepare insert with column names, and do schema change - try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name, score) values(?, ?, ?)")) { - ps.setObject(1, 24) - ps.setObject(2, "f") - ps.setObject(3, 90) - ps.addBatch() - int[] result = ps.executeBatch() - logger.info("prepare insert result: " + result) - - sql """ alter table ${table} DROP column age; """ - assertTrue(getAlterTableState(), "drop column should success") - - ps.setObject(1, 25) + } + getRowCount(13) + + // prepare insert without column names, and do schema change + try (PreparedStatement ps = connection.prepareStatement("insert into ${table} values(?, ?, ?)")) { + ps.setObject(1, 22) + ps.setObject(2, "f") + ps.setObject(3, 90) + ps.addBatch() + int[] result = ps.executeBatch() + logger.info("prepare insert result: " + result) + + sql """ alter table ${table} ADD column age int after name; """ + assertTrue(getAlterTableState(), "add column should success") + + try { + ps.setObject(1, 23) ps.setObject(2, "f") ps.setObject(3, 90) ps.addBatch() result = ps.executeBatch() - logger.info("prepare insert result: " + result) + assertTrue(false) + } catch (Exception e) { + logger.info("exception : " + e) + assertTrue(e.getMessage().contains("insert into cols should be corresponding to the query output")) } - getRowCount(16) } - } finally { - // try_sql("DROP TABLE ${table}") + getRowCount(14) + + // prepare insert with column names, and do schema change + try (PreparedStatement ps = connection.prepareStatement("insert into ${table}(id, name, score) values(?, ?, ?)")) { + ps.setObject(1, 24) + ps.setObject(2, "f") + ps.setObject(3, 90) + ps.addBatch() + int[] result = ps.executeBatch() + logger.info("prepare insert result: " + result) + + sql """ alter table ${table} DROP column age; """ + assertTrue(getAlterTableState(), "drop column should success") + + ps.setObject(1, 25) + ps.setObject(2, "f") + ps.setObject(3, 90) + ps.addBatch() + result = ps.executeBatch() + logger.info("prepare insert result: " + result) + } + getRowCount(16) } + } finally { + // try_sql("DROP TABLE ${table}") } + } diff --git a/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy b/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy index b66130c9e29627..989446fd097656 100644 --- a/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy +++ b/regression-test/suites/insert_p0/insert_group_commit_with_large_data.groovy @@ -48,12 +48,11 @@ suite("insert_group_commit_with_large_data") { assertTrue(serverInfo.contains("'label':'group_commit_")) } - for (item in ["legacy", "nereids"]) { - try { - // create table - sql """ drop table if exists ${table}; """ + try { + // create table + sql """ drop table if exists ${table}; """ - sql """ + sql """ CREATE TABLE `${table}` ( `id` int(11) NOT NULL, `name` varchar(1100) NULL, @@ -67,40 +66,33 @@ suite("insert_group_commit_with_large_data") { ); """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - sql """ use ${db}; """ + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql """ set group_commit = async_mode; """ + sql """ use ${db}; """ - // insert into 5000 rows - def insert_sql = """ insert into ${table} values(1, 'a', 10) """ - for (def i in 2..5000) { - insert_sql += """, (${i}, 'a', 10) """ - } - group_commit_insert insert_sql, 5000 - getRowCount(5000) + // insert into 5000 rows + def insert_sql = """ insert into ${table} values(1, 'a', 10) """ + for (def i in 2..5000) { + insert_sql += """, (${i}, 'a', 10) """ + } + group_commit_insert insert_sql, 5000 + getRowCount(5000) - // data size is large than 4MB, need " set global max_allowed_packet = 5508950 " - /*def name_value = "" - for (def i in 0..1024) { - name_value += 'a' - } - insert_sql = """ insert into ${table} values(1, '${name_value}', 10) """ - for (def i in 2..5000) { - insert_sql += """, (${i}, '${name_value}', 10) """ - } - result = sql """ ${insert_sql} """ - group_commit_insert insert_sql, 5000 - getRowCount(10000) - */ + // data size is large than 4MB, need " set global max_allowed_packet = 5508950 " + /*def name_value = "" + for (def i in 0..1024) { + name_value += 'a' + } + insert_sql = """ insert into ${table} values(1, '${name_value}', 10) """ + for (def i in 2..5000) { + insert_sql += """, (${i}, '${name_value}', 10) """ } - } finally { - // try_sql("DROP TABLE ${table}") + result = sql """ ${insert_sql} """ + group_commit_insert insert_sql, 5000 + getRowCount(10000) + */ } + } finally { + // try_sql("DROP TABLE ${table}") } } diff --git a/regression-test/suites/insert_p0/insert_with_null.groovy b/regression-test/suites/insert_p0/insert_with_null.groovy index e1c9fd92477a5e..19e49a749cf127 100644 --- a/regression-test/suites/insert_p0/insert_with_null.groovy +++ b/regression-test/suites/insert_p0/insert_with_null.groovy @@ -48,7 +48,7 @@ suite("insert_with_null") { } } - def write_modes = ["insert", "txn_insert", "group_commit_legacy", "group_commit_nereids"] + def write_modes = ["insert", "txn_insert", "group_commit"] for (def write_mode : write_modes) { sql """ DROP TABLE IF EXISTS ${table} """ @@ -66,13 +66,8 @@ suite("insert_with_null") { """ if (write_mode == "txn_insert") { sql "begin" - } else if (write_mode == "group_commit_legacy") { + } else if (write_mode == "group_commit") { sql """ set group_commit = async_mode; """ - sql """ set enable_nereids_planner = false; """ - } else if (write_mode == "group_commit_nereids") { - sql """ set group_commit = async_mode; """ - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ } sql """ insert into ${table} values(1, '"b"', ["k1=v1, k2=v2"]); """ diff --git a/regression-test/suites/insert_p0/test_group_commit_data_bytes_property.groovy b/regression-test/suites/insert_p0/test_group_commit_data_bytes_property.groovy index a19e545d43a77c..2fc08b68fb1edc 100644 --- a/regression-test/suites/insert_p0/test_group_commit_data_bytes_property.groovy +++ b/regression-test/suites/insert_p0/test_group_commit_data_bytes_property.groovy @@ -43,12 +43,10 @@ suite("test_group_commit_data_bytes_property") { } - - for (item in ["legacy", "nereids"]) { - try { - def test_table = table + "_" + item; - sql """ drop table if exists ${test_table} force; """ - sql """ + try { + def test_table = table + sql """ drop table if exists ${test_table} force; """ + sql """ CREATE table ${test_table} ( k bigint, v bigint @@ -61,17 +59,10 @@ suite("test_group_commit_data_bytes_property") { ); """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - def res1 = sql """show create table ${test_table}""" assertTrue(res1.toString().contains("\"group_commit_data_bytes\" = \"1024\"")) @@ -79,7 +70,7 @@ suite("test_group_commit_data_bytes_property") { def msg2 = group_commit_insert """insert into ${test_table} values(2,2) """, 1 - assertEquals(msg1.substring(msg1.indexOf("group_commit")+11, msg1.indexOf("group_commit")+43), msg2.substring(msg2.indexOf("group_commit")+11, msg2.indexOf("group_commit")+43)); + assertEquals(msg1.substring(msg1.indexOf("group_commit") + 11, msg1.indexOf("group_commit") + 43), msg2.substring(msg2.indexOf("group_commit") + 11, msg2.indexOf("group_commit") + 43)); sql "ALTER table ${test_table} SET (\"group_commit_data_bytes\"=\"1\"); " @@ -93,27 +84,26 @@ suite("test_group_commit_data_bytes_property") { // add a retry for can not get a block queue because the data bytes is too small def msg4 = "" Awaitility.await().atMost(10, SECONDS).until( - { - try { - sql """ set group_commit = async_mode; """ - msg4 = group_commit_insert """insert into ${test_table} values(4,4); """, 1 - return true - } catch (Exception e) { - logger.info("get exception: ${e.getMessage()}") - if (e.getMessage().contains("can not get a block queue")) { - return false - } else { - throw e + { + try { + sql """ set group_commit = async_mode; """ + msg4 = group_commit_insert """insert into ${test_table} values(4,4); """, 1 + return true + } catch (Exception e) { + logger.info("get exception: ${e.getMessage()}") + if (e.getMessage().contains("can not get a block queue")) { + return false + } else { + throw e + } } } - } ) - assertNotEquals(msg3.substring(msg3.indexOf("group_commit")+11, msg3.indexOf("group_commit")+43), msg4.substring(msg4.indexOf("group_commit")+11, msg4.indexOf("group_commit")+43)); + assertNotEquals(msg3.substring(msg3.indexOf("group_commit") + 11, msg3.indexOf("group_commit") + 43), msg4.substring(msg4.indexOf("group_commit") + 11, msg4.indexOf("group_commit") + 43)); - } - } finally { - // try_sql("DROP TABLE ${table}") } + } finally { + // try_sql("DROP TABLE ${table}") } } diff --git a/regression-test/suites/insert_p0/test_group_commit_interval_ms_property.groovy b/regression-test/suites/insert_p0/test_group_commit_interval_ms_property.groovy index 052ecc3d98e0c3..688c8575366374 100644 --- a/regression-test/suites/insert_p0/test_group_commit_interval_ms_property.groovy +++ b/regression-test/suites/insert_p0/test_group_commit_interval_ms_property.groovy @@ -40,12 +40,10 @@ suite("test_group_commit_interval_ms_property") { } - - for (item in ["legacy", "nereids"]) { - try { - test_table = table + "_" + item; - sql """ drop table if exists ${test_table} force; """ - sql """ + try { + test_table = table + sql """ drop table if exists ${test_table} force; """ + sql """ CREATE table ${test_table} ( k bigint, v bigint @@ -58,17 +56,10 @@ suite("test_group_commit_interval_ms_property") { ); """ - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { sql """ set group_commit = async_mode; """ - if (item == "nereids") { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - def res1 = sql """show create table ${test_table}""" assertTrue(res1.toString().contains("\"group_commit_interval_ms\" = \"10000\"")) @@ -78,7 +69,7 @@ suite("test_group_commit_interval_ms_property") { def msg2 = group_commit_insert """insert into ${test_table} values(2,2) """, 1 - assertEquals(msg1.substring(msg1.indexOf("group_commit")+11, msg1.indexOf("group_commit")+43), msg2.substring(msg2.indexOf("group_commit")+11, msg2.indexOf("group_commit")+43)); + assertEquals(msg1.substring(msg1.indexOf("group_commit") + 11, msg1.indexOf("group_commit") + 43), msg2.substring(msg2.indexOf("group_commit") + 11, msg2.indexOf("group_commit") + 43)); sql "ALTER table ${test_table} SET (\"group_commit_interval_ms\"=\"1000\"); " @@ -91,11 +82,10 @@ suite("test_group_commit_interval_ms_property") { def msg4 = group_commit_insert """insert into ${test_table} values(4,4); """, 1 - assertNotEquals(msg3.substring(msg3.indexOf("group_commit")+11, msg3.indexOf("group_commit")+43), msg4.substring(msg4.indexOf("group_commit")+11, msg4.indexOf("group_commit")+43)); + assertNotEquals(msg3.substring(msg3.indexOf("group_commit") + 11, msg3.indexOf("group_commit") + 43), msg4.substring(msg4.indexOf("group_commit") + 11, msg4.indexOf("group_commit") + 43)); - } - } finally { - // try_sql("DROP TABLE ${table}") } + } finally { + // try_sql("DROP TABLE ${table}") } } diff --git a/regression-test/suites/insert_p0/test_insert_partition_fail_url.groovy b/regression-test/suites/insert_p0/test_insert_partition_fail_url.groovy index 9b0a7f3a1a7782..b723cc35da1107 100644 --- a/regression-test/suites/insert_p0/test_insert_partition_fail_url.groovy +++ b/regression-test/suites/insert_p0/test_insert_partition_fail_url.groovy @@ -73,11 +73,18 @@ suite("test_insert_partition_fail_url") { INSERT INTO ${srcName} SELECT * FROM ${srcName}; """ + // The error message may vary due to variations in fuzzy execution instance number or batch size. + // like this: + // Insert has filtered data in strict mode. url: http://172.16.0.10:8041/api/_load_error_log? + // file=__shard_303/error_log_insert_stmt_a1ccfb9c67ba40f5-900d0db1d06a19dd_a1ccfb9c67ba40f5_900d0db1d06a19dd + // or like this: + // [DATA_QUALITY_ERROR]Encountered unqualified data, stop processing. url: http://172.16.0.10:8041/api/_load_error_log? + // file=__shard_303/error_log_insert_stmt_a1ccfb9c67ba40f5-900d0db1d06a19dd_a1ccfb9c67ba40f5_900d0db1d06a19dd expectExceptionLike({ sql """ INSERT INTO ${dstName} SELECT `id`, `score` FROM ${srcName}; """ - }, "Insert has filtered data in strict mode. url: ") + }, "error_log") sql """ INSERT INTO ${srcName} SELECT * FROM ${srcName}; @@ -87,5 +94,5 @@ suite("test_insert_partition_fail_url") { sql """ INSERT INTO ${dstName} SELECT `id`, `score` FROM ${srcName}; """ - }, "[DATA_QUALITY_ERROR]Encountered unqualified data, stop processing. url: ") + }, "error_log") } diff --git a/regression-test/suites/insert_p0/test_insert_strict_fail_url.groovy b/regression-test/suites/insert_p0/test_insert_strict_fail_url.groovy index 8d95a423cddc25..b303d95fa91ae6 100644 --- a/regression-test/suites/insert_p0/test_insert_strict_fail_url.groovy +++ b/regression-test/suites/insert_p0/test_insert_strict_fail_url.groovy @@ -69,11 +69,18 @@ suite("test_insert_strict_fail_url") { INSERT INTO ${srcName} SELECT * FROM ${srcName}; """ + // The error message may vary due to variations in fuzzy execution instance number or batch size. + // like this: + // Insert has filtered data in strict mode. url: http://172.16.0.10:8041/api/_load_error_log? + // file=__shard_303/error_log_insert_stmt_a1ccfb9c67ba40f5-900d0db1d06a19dd_a1ccfb9c67ba40f5_900d0db1d06a19dd + // or like this: + // [DATA_QUALITY_ERROR]Encountered unqualified data, stop processing. url: http://172.16.0.10:8041/api/_load_error_log? + // file=__shard_303/error_log_insert_stmt_a1ccfb9c67ba40f5-900d0db1d06a19dd_a1ccfb9c67ba40f5_900d0db1d06a19dd expectExceptionLike({ sql """ INSERT INTO ${dstName} SELECT `id`, `score` FROM ${srcName}; """ - }, "Insert has filtered data in strict mode. url: ") + }, "error_log") sql """ INSERT INTO ${srcName} SELECT * FROM ${srcName}; @@ -83,5 +90,5 @@ suite("test_insert_strict_fail_url") { sql """ INSERT INTO ${dstName} SELECT `id`, `score` FROM ${srcName}; """ - }, "[DATA_QUALITY_ERROR]Encountered unqualified data, stop processing. url: ") + }, "error_log") } diff --git a/regression-test/suites/insert_p0/transaction/txn_insert.groovy b/regression-test/suites/insert_p0/transaction/txn_insert.groovy index 1f595d89173068..44ed52f098e1c9 100644 --- a/regression-test/suites/insert_p0/transaction/txn_insert.groovy +++ b/regression-test/suites/insert_p0/transaction/txn_insert.groovy @@ -734,6 +734,7 @@ suite("txn_insert") { UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "false", "replication_num" = "1" ); """ diff --git a/regression-test/suites/insert_p0/transaction/txn_insert_with_specify_columns.groovy b/regression-test/suites/insert_p0/transaction/txn_insert_with_specify_columns.groovy index 495b84aed788b8..e7c09d267399bd 100644 --- a/regression-test/suites/insert_p0/transaction/txn_insert_with_specify_columns.groovy +++ b/regression-test/suites/insert_p0/transaction/txn_insert_with_specify_columns.groovy @@ -65,58 +65,4 @@ suite("txn_insert_with_specify_columns", "p0") { sql """insert into ${table} (c3, c2, c1) values(35, 25, 15);""" sql """commit""" qt_select_unique """select c1,c2,c3 from ${table} order by c1,c2,c3""" - - try { - sql """set enable_nereids_planner=false""" - sql """ DROP TABLE IF EXISTS ${table}""" - sql """ - CREATE TABLE ${table}( - c1 INT NULL, - c2 INT NULL, - c3 INT NULL default 1 - ) ENGINE=OLAP - UNIQUE KEY(c1) - DISTRIBUTED BY HASH(c1) BUCKETS 3 - PROPERTIES ( - "replication_num" = "1" - ); - """ - sql """begin""" - sql """insert into ${table} (c1, c3, c2) values(10, 30, 20);""" - logger.info(failed) - assertFalse(true); - } catch (Exception e) { - logger.info(e.getMessage()) - assertTrue(e.getMessage().contains("The legacy planner does not support specifying column names")) - } finally { - sql "commit" - sql """ DROP TABLE IF EXISTS ${table}""" - } - - try { - sql """set enable_nereids_planner=false""" - sql """ DROP TABLE IF EXISTS ${table}""" - sql """ - CREATE TABLE ${table}( - c1 INT NULL, - c2 INT NULL, - c3 INT NULL default 1 - ) ENGINE=OLAP - DUPLICATE KEY(c1) - DISTRIBUTED BY HASH(c1) BUCKETS 3 - PROPERTIES ( - "replication_num" = "1" - ); - """ - sql """begin""" - sql """insert into ${table} (c1, c3, c2) values(10, 30, 20);""" - logger.info(failed) - assertFalse(true); - } catch (Exception e) { - logger.info(e.getMessage()) - assertTrue(e.getMessage().contains("The legacy planner does not support specifying column names")) - } finally { - sql "commit" - sql """ DROP TABLE IF EXISTS ${table}""" - } } diff --git a/regression-test/suites/insert_p2/test_group_commit_http_stream_lineitem_schema_change.groovy b/regression-test/suites/insert_p2/test_group_commit_http_stream_lineitem_schema_change.groovy index 2d0246774c43cb..2abfcd8612692b 100644 --- a/regression-test/suites/insert_p2/test_group_commit_http_stream_lineitem_schema_change.groovy +++ b/regression-test/suites/insert_p2/test_group_commit_http_stream_lineitem_schema_change.groovy @@ -113,6 +113,7 @@ DUPLICATE KEY(`l_shipdate`, `l_orderkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ); """ @@ -144,6 +145,7 @@ DUPLICATE KEY(`l_shipdate`, `l_orderkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ); """ diff --git a/regression-test/suites/insert_p2/test_group_commit_insert_into_lineitem_scheme_change.groovy b/regression-test/suites/insert_p2/test_group_commit_insert_into_lineitem_scheme_change.groovy index 4043c7660ce956..7924260b72a437 100644 --- a/regression-test/suites/insert_p2/test_group_commit_insert_into_lineitem_scheme_change.groovy +++ b/regression-test/suites/insert_p2/test_group_commit_insert_into_lineitem_scheme_change.groovy @@ -129,6 +129,7 @@ DUPLICATE KEY(`l_shipdate`, `l_orderkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ); """ @@ -161,6 +162,7 @@ DUPLICATE KEY(`l_shipdate`, `l_orderkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ); """ diff --git a/regression-test/suites/insert_p2/test_group_commit_stream_load_lineitem_schema_change.groovy b/regression-test/suites/insert_p2/test_group_commit_stream_load_lineitem_schema_change.groovy index 86c422f0d7354d..b42f5d9038c14f 100644 --- a/regression-test/suites/insert_p2/test_group_commit_stream_load_lineitem_schema_change.groovy +++ b/regression-test/suites/insert_p2/test_group_commit_stream_load_lineitem_schema_change.groovy @@ -114,6 +114,7 @@ DUPLICATE KEY(`l_shipdate`, `l_orderkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ); """ @@ -145,6 +146,7 @@ DUPLICATE KEY(`l_shipdate`, `l_orderkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ); """ diff --git a/regression-test/suites/insert_p2/txn_insert_concurrent_insert_ud.groovy b/regression-test/suites/insert_p2/txn_insert_concurrent_insert_ud.groovy index fe1f5533701df8..a524703f9ef99f 100644 --- a/regression-test/suites/insert_p2/txn_insert_concurrent_insert_ud.groovy +++ b/regression-test/suites/insert_p2/txn_insert_concurrent_insert_ud.groovy @@ -57,6 +57,7 @@ suite("txn_insert_concurrent_insert_ud") { UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) """ diff --git a/regression-test/suites/insert_p2/txn_insert_concurrent_insert_update.groovy b/regression-test/suites/insert_p2/txn_insert_concurrent_insert_update.groovy index a5d0bcd114b568..b467a87de8201c 100644 --- a/regression-test/suites/insert_p2/txn_insert_concurrent_insert_update.groovy +++ b/regression-test/suites/insert_p2/txn_insert_concurrent_insert_update.groovy @@ -57,6 +57,7 @@ suite("txn_insert_concurrent_insert_update") { UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) """ diff --git a/regression-test/suites/insert_p2/txn_insert_with_schema_change.groovy b/regression-test/suites/insert_p2/txn_insert_with_schema_change.groovy index 56692b68d3730d..ac05e3a69f4bb8 100644 --- a/regression-test/suites/insert_p2/txn_insert_with_schema_change.groovy +++ b/regression-test/suites/insert_p2/txn_insert_with_schema_change.groovy @@ -54,6 +54,7 @@ suite("txn_insert_with_schema_change") { DUPLICATE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) """ diff --git a/regression-test/suites/inverted_index_p0/test_count_on_index.groovy b/regression-test/suites/inverted_index_p0/test_count_on_index.groovy index 4197231089af5d..320fc65ff76bc9 100644 --- a/regression-test/suites/inverted_index_p0/test_count_on_index.groovy +++ b/regression-test/suites/inverted_index_p0/test_count_on_index.groovy @@ -93,7 +93,7 @@ suite("test_count_on_index_httplogs", "p0") { """ } - def load_httplogs_data = {table_name, label, read_flag, format_flag, file_name, ignore_failure=false, + def stream_load_data = {table_name, label, read_flag, format_flag, file_name, ignore_failure=false, expected_succ_rows = -1, load_to_single_tablet = 'true' -> // load the json data @@ -137,8 +137,8 @@ suite("test_count_on_index_httplogs", "p0") { create_httplogs_dup_table.call(testTable_dup) create_httplogs_unique_table.call(testTable_unique) - load_httplogs_data.call(testTable_dup, 'test_httplogs_load_count_on_index', 'true', 'json', 'documents-1000.json') - load_httplogs_data.call(testTable_unique, 'test_httplogs_load_count_on_index', 'true', 'json', 'documents-1000.json') + stream_load_data.call(testTable_dup, 'test_httplogs_load_count_on_index', 'true', 'json', 'documents-1000.json') + stream_load_data.call(testTable_unique, 'test_httplogs_load_count_on_index', 'true', 'json', 'documents-1000.json') sql "sync" sql """ set enable_common_expr_pushdown = true """ @@ -278,6 +278,41 @@ suite("test_count_on_index_httplogs", "p0") { // case4: test compound query when inverted_index_query disable qt_sql "SELECT COUNT() from ${testTable_dup} where request = 'images' or (size = 0 and status > 400)" qt_sql "SELECT /*+SET_VAR(enable_inverted_index_query=false) */ COUNT() from ${testTable_dup} where request = 'images' or (size = 0 and status > 400)" + + // case5: test complex count to testify bad case + def tableName5 = 'test_count_on_index_bad_case' + sql "DROP TABLE IF EXISTS ${tableName5}" + sql """ + CREATE TABLE `${tableName5}` ( + `a` DATE NOT NULL COMMENT '', + `b` VARCHAR(4096) NULL COMMENT '', + `c` VARCHAR(4096) NULL COMMENT '', + `d` VARCHAR(4096) NULL COMMENT '', + `e` VARCHAR(4096) NULL COMMENT '', + INDEX idx_a(`a`) USING INVERTED COMMENT '', + INDEX idx_e(`e`) USING INVERTED COMMENT '' + ) ENGINE=OLAP + UNIQUE KEY(`a`, `b`) + COMMENT '' + DISTRIBUTED BY HASH(`a`) BUCKETS 3 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + stream_load_data.call(tableName5, 'test_count_on_index_bad_case', 'true', 'json', 'count-on-index.json') + def bad_sql = """ + SELECT + COUNT(CASE WHEN c IN ('c1', 'c2', 'c3') AND d = 'd1' THEN b END) AS num1, + COUNT(CASE WHEN e = 'e1' AND c IN ('c1', 'c2', 'c3') THEN b END) AS num2 + FROM ${tableName5} + WHERE a = '2024-07-26' + AND e = 'e1'; + """ + explain { + sql("${bad_sql}") + contains "pushAggOp=NONE" + } + qt_sql_bad "${bad_sql}" } finally { //try_sql("DROP TABLE IF EXISTS ${testTable}") } diff --git a/regression-test/suites/inverted_index_p0/test_index_no_need_read_data.groovy b/regression-test/suites/inverted_index_p0/test_index_no_need_read_data.groovy index 924fffe54d77b2..43ec140fd3a2ed 100644 --- a/regression-test/suites/inverted_index_p0/test_index_no_need_read_data.groovy +++ b/regression-test/suites/inverted_index_p0/test_index_no_need_read_data.groovy @@ -53,7 +53,6 @@ suite("test_index_no_need_read_data", "inverted_index_select"){ """ // case1: enable nereids planner - sql "set enable_nereids_planner = true" sql """ set enable_common_expr_pushdown = true; """ qt_select_nereids_0 "SELECT * FROM ${table1} ORDER BY id" @@ -70,23 +69,6 @@ suite("test_index_no_need_read_data", "inverted_index_select"){ qt_select_nereids_11 "SELECT addr, name, upper(city) FROM ( SELECT * from ${table1} WHERE city != 'beijing' ORDER BY id) t" qt_select_nereids_12 "SELECT sum(n) FROM ${table1} WHERE city = 'beijing' group by id ORDER BY id" - // case2: disable nereids planner - sql "set enable_nereids_planner = false" - - qt_select_0 "SELECT * FROM ${table1} ORDER BY id" - qt_select_1 "SELECT count() FROM ${table1} WHERE n > 100" - qt_select_2 "SELECT count() FROM ${table1} WHERE city = 'beijing'" - qt_select_3 "SELECT count(*) FROM ${table1} WHERE city = 'beijing'" - qt_select_4 "SELECT * FROM ${table1} WHERE city = 'beijing' ORDER BY id" - qt_select_5 "SELECT city, addr, name FROM ${table1} WHERE city = 'beijing' ORDER BY id" - qt_select_6 "SELECT addr, name FROM ${table1} WHERE city > 'beijing' ORDER BY city" - qt_select_7 "SELECT addr, name FROM ${table1} WHERE city > 'beijing' ORDER BY id" - qt_select_8 "SELECT upper(city), name FROM ${table1} WHERE city != 'beijing' ORDER BY id" - qt_select_9 "SELECT length(addr), name FROM ${table1} WHERE city != 'beijing' ORDER BY id" - qt_select_10 "SELECT addr, name FROM ( SELECT * from ${table1} WHERE city != 'beijing' ORDER BY id) t" - qt_select_11 "SELECT addr, name, upper(city) FROM ( SELECT * from ${table1} WHERE city != 'beijing' ORDER BY id) t" - qt_select_12 "SELECT sum(n) FROM ${table1} WHERE city = 'beijing' group by id ORDER BY id" - def table2 = "test_index_no_need_read_data2" sql "drop table if exists ${table2}" diff --git a/regression-test/suites/jsonb_p0/test_jsonb_load_and_function.groovy b/regression-test/suites/jsonb_p0/test_jsonb_load_and_function.groovy index 46f61f63fd750f..cb1b8f4262a1c8 100644 --- a/regression-test/suites/jsonb_p0/test_jsonb_load_and_function.groovy +++ b/regression-test/suites/jsonb_p0/test_jsonb_load_and_function.groovy @@ -532,15 +532,6 @@ suite("test_jsonb_load_and_function", "p0") { qt_select_json_contains """SELECT id, j, json_contains(j, cast('{"k2":300}' as json)) FROM ${testTable} ORDER BY id""" qt_select_json_contains """SELECT id, j, json_contains(j, cast('{"k1":"v41","k2":400}' as json), '\$.a1') FROM ${testTable} ORDER BY id""" qt_select_json_contains """SELECT id, j, json_contains(j, cast('[123,456]' as json)) FROM ${testTable} ORDER BY id""" - // old planner do not support explode_json_object - test { - sql """ select /*+SET_VAR(experimental_enable_nereids_planner=false)*/ id, j, k,v from ${testTable} lateral view explode_json_object_outer(j) tmp as k,v order by id; """ - exception "errCode = 2" - } - test { - sql """ select /*+SET_VAR(experimental_enable_nereids_planner=false)*/ id, j, k,v from ${testTable} lateral view explode_json_object_outer(j) tmp as k,v order by id; """ - exception "errCode = 2" - } // json_parse qt_sql_json_parse """SELECT/*+SET_VAR(enable_fold_constant_by_be=false)*/ json_parse('{"":"v1"}')""" diff --git a/regression-test/suites/load_p0/insert/test_insert_default_value.groovy b/regression-test/suites/load_p0/insert/test_insert_default_value.groovy index f2f2e76c5f06cd..3d071d3eaafbec 100644 --- a/regression-test/suites/load_p0/insert/test_insert_default_value.groovy +++ b/regression-test/suites/load_p0/insert/test_insert_default_value.groovy @@ -17,8 +17,6 @@ suite("test_insert_default_value") { - sql """ SET enable_fallback_to_original_planner=false """ - sql """ DROP TABLE IF EXISTS test_insert_dft_tbl""" sql """ @@ -39,10 +37,7 @@ suite("test_insert_default_value") { ); """ - sql """ set enable_nereids_planner=true """ sql """ insert into test_insert_dft_tbl values() """ - - sql """ set enable_nereids_planner=false """ sql """ insert into test_insert_dft_tbl values() """ qt_select1 """ select k1, k2, k3, k4, k5, k6, k7 from test_insert_dft_tbl """ @@ -70,10 +65,7 @@ suite("test_insert_default_value") { ); """ - sql """ set enable_nereids_planner=true """ sql """ insert into test_insert_dft_tbl values() """ - - sql """ set enable_nereids_planner=false """ sql """ insert into test_insert_dft_tbl values() """ qt_select2 """ select k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11 from test_insert_dft_tbl """ @@ -112,14 +104,8 @@ suite("test_insert_default_value") { ); """ - sql """ set enable_nereids_planner=true """ sql """ INSERT INTO `test_insert_default_null` (gz_organization_id, `company_id`, `material_id`, create_time) VALUES ('1', '2', 'test', DEFAULT); """ qt_select3 """ select * from test_insert_default_null;""" sql """ truncate table test_insert_default_null;""" - - sql """ set enable_nereids_planner=false """ - sql """ INSERT INTO `test_insert_default_null` (gz_organization_id, `company_id`, `material_id`, create_time) VALUES ('1', '2', 'test', DEFAULT); """ - - qt_select4 """ select * from test_insert_default_null;""" sql "drop table if exists test_insert_default_null" } diff --git a/regression-test/suites/load_p2/broker_load/test_broker_load.groovy b/regression-test/suites/load_p2/broker_load/test_broker_load.groovy index e5f8272f9862c4..c0574d17239b65 100644 --- a/regression-test/suites/load_p2/broker_load/test_broker_load.groovy +++ b/regression-test/suites/load_p2/broker_load/test_broker_load.groovy @@ -342,8 +342,9 @@ suite("test_broker_load_p2", "p2") { break; } if (result[0][2].equals("CANCELLED")) { + logger.info("Load result: " + result[0]) assertTrue(result[0][6].contains(task_info[i])) - assertTrue(result[0][7].contains(error_msg[i])) + assertTrue(result[0][7].contains(error_msg[i]), "expected: " + error_msg[i] + ", actual: " + result[0][7] + ", label: $label") break; } Thread.sleep(1000) diff --git a/regression-test/suites/manager/test_manager_interface_1.groovy b/regression-test/suites/manager/test_manager_interface_1.groovy index 5621b4fcee96d0..249ce252ffbf86 100644 --- a/regression-test/suites/manager/test_manager_interface_1.groovy +++ b/regression-test/suites/manager/test_manager_interface_1.groovy @@ -118,7 +118,7 @@ suite('test_manager_interface_1',"p0") { sql """ drop table test_metadata_name_ids """ - qt_metadata_2 """ select CATALOG_NAME,DATABASE_NAME,TABLE_NAME from ${tableName} + qt_metadata_3 """ select CATALOG_NAME,DATABASE_NAME,TABLE_NAME from ${tableName} where CATALOG_NAME="internal" and DATABASE_NAME ="test_manager_metadata_name_ids" and TABLE_NAME="test_metadata_name_ids";""" } test_metadata_name_ids() @@ -273,18 +273,19 @@ suite('test_manager_interface_1',"p0") { assertTrue(result[0][0].toLowerCase() == "test_manager_tb_1") - result = sql """ show create table test_manager_tb_1""" + result = sql """ show create table test_manager_tb_1""" + logger.info ("result = ${result}") assertTrue(result[0][0] == "test_manager_tb_1") // TABLE NAME // assertTrue(result[0][1].substring() == "test_manager_tb_1") //DDL def ddl_str = result[0][1] def idx = ddl_str.indexOf("PROPERTIES") assertTrue(idx != -1 ); assertTrue( ddl_str.startsWith("""CREATE TABLE `test_manager_tb_1` ( - `k1` TINYINT NULL, - `k2` DECIMAL(10, 2) NULL DEFAULT "10.05", - `k3` CHAR(10) NULL COMMENT 'string column', - `k4` INT NOT NULL DEFAULT "1" COMMENT 'int column', - `k5` TEXT NULL + `k1` tinyint NULL, + `k2` decimal(10,2) NULL DEFAULT "10.05", + `k3` char(10) NULL COMMENT 'string column', + `k4` int NOT NULL DEFAULT "1" COMMENT 'int column', + `k5` text NULL ) ENGINE=OLAP DUPLICATE KEY(`k1`, `k2`, `k3`) COMMENT 'manager_test_table' @@ -680,28 +681,28 @@ DISTRIBUTED BY HASH(`k1`) BUCKETS 1""")) assertTrue(result[0][0] == "audit_log") assertTrue(result[0][1].contains("CREATE TABLE `audit_log`")) - assertTrue(result[0][1].contains("`query_id` VARCHAR(48) NULL,")) - assertTrue(result[0][1].contains("`time` DATETIME(3) NULL,")) - assertTrue(result[0][1].contains("`client_ip` VARCHAR(128) NULL,")) - assertTrue(result[0][1].contains("`user` VARCHAR(128) NULL,")) - assertTrue(result[0][1].contains("`catalog` VARCHAR(128) NULL")) - assertTrue(result[0][1].contains("`db` VARCHAR(128) NULL,")) - assertTrue(result[0][1].contains("`state` VARCHAR(128) NULL")) - assertTrue(result[0][1].contains("`error_code` INT NULL,")) - assertTrue(result[0][1].contains("`error_message` TEXT NULL,")) - assertTrue(result[0][1].contains("`query_time` BIGINT NULL,")) - assertTrue(result[0][1].contains("`scan_bytes` BIGINT NULL,")) - assertTrue(result[0][1].contains("`scan_rows` BIGINT NULL,")) - assertTrue(result[0][1].contains("`return_rows` BIGINT NULL,")) - assertTrue(result[0][1].contains("`stmt_id` BIGINT NULL,")) - assertTrue(result[0][1].contains("`is_query` TINYINT NULL,")) - assertTrue(result[0][1].contains("`frontend_ip` VARCHAR(128) NULL,")) - assertTrue(result[0][1].contains("`cpu_time_ms` BIGINT NULL,")) - assertTrue(result[0][1].contains("`sql_hash` VARCHAR(128) NULL,")) - assertTrue(result[0][1].contains("`sql_digest` VARCHAR(128) NULL,")) - assertTrue(result[0][1].contains("`peak_memory_bytes` BIGINT NULL,")) - assertTrue(result[0][1].contains("`workload_group` TEXT NULL,")) - assertTrue(result[0][1].contains("`stmt` TEXT NULL")) + assertTrue(result[0][1].contains("`query_id` varchar(48) NULL,")) + assertTrue(result[0][1].contains("`time` datetime(3) NULL,")) + assertTrue(result[0][1].contains("`client_ip` varchar(128) NULL,")) + assertTrue(result[0][1].contains("`user` varchar(128) NULL,")) + assertTrue(result[0][1].contains("`catalog` varchar(128) NULL")) + assertTrue(result[0][1].contains("`db` varchar(128) NULL,")) + assertTrue(result[0][1].contains("`state` varchar(128) NULL")) + assertTrue(result[0][1].contains("`error_code` int NULL,")) + assertTrue(result[0][1].contains("`error_message` text NULL,")) + assertTrue(result[0][1].contains("`query_time` bigint NULL,")) + assertTrue(result[0][1].contains("`scan_bytes` bigint NULL,")) + assertTrue(result[0][1].contains("`scan_rows` bigint NULL,")) + assertTrue(result[0][1].contains("`return_rows` bigint NULL,")) + assertTrue(result[0][1].contains("`stmt_id` bigint NULL,")) + assertTrue(result[0][1].contains("`is_query` tinyint NULL,")) + assertTrue(result[0][1].contains("`frontend_ip` varchar(128) NULL,")) + assertTrue(result[0][1].contains("`cpu_time_ms` bigint NULL,")) + assertTrue(result[0][1].contains("`sql_hash` varchar(128) NULL,")) + assertTrue(result[0][1].contains("`sql_digest` varchar(128) NULL,")) + assertTrue(result[0][1].contains("`peak_memory_bytes` bigint NULL,")) + assertTrue(result[0][1].contains("`workload_group` text NULL,")) + assertTrue(result[0][1].contains("`stmt` text NULL")) assertTrue(result[0][1].contains("ENGINE=OLAP")) diff --git a/regression-test/suites/mtmv_p0/same_column_name_check/same_column_name_check.groovy b/regression-test/suites/mtmv_p0/same_column_name_check/same_column_name_check.groovy new file mode 100644 index 00000000000000..0eb99ab04177df --- /dev/null +++ b/regression-test/suites/mtmv_p0/same_column_name_check/same_column_name_check.groovy @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("same_column_name_check") { + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "set runtime_filter_mode=OFF"; + sql "SET ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" + + sql """ + drop table if exists orders + """ + + sql """ + CREATE TABLE IF NOT EXISTS orders ( + o_orderkey INTEGER NOT NULL, + o_custkey INTEGER NOT NULL, + o_orderstatus CHAR(1) NOT NULL, + o_totalprice DECIMALV3(15,2) NOT NULL, + o_orderdate DATE NOT NULL, + o_orderpriority CHAR(15) NOT NULL, + o_clerk CHAR(15) NOT NULL, + o_shippriority INTEGER NOT NULL, + O_COMMENT VARCHAR(79) NOT NULL + ) + DUPLICATE KEY(o_orderkey, o_custkey) + PARTITION BY RANGE(o_orderdate) ( + PARTITION `day_2` VALUES LESS THAN ('2023-12-9'), + PARTITION `day_3` VALUES LESS THAN ("2023-12-11"), + PARTITION `day_4` VALUES LESS THAN ("2023-12-30") + ) + DISTRIBUTED BY HASH(o_orderkey) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + insert into orders values + (1, 1, 'o', 9.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (1, 1, 'o', 10.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (1, 1, 'o', 10.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (1, 1, 'o', 10.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (2, 1, 'o', 11.5, '2023-12-09', 'a', 'b', 1, 'yy'), + (2, 1, 'o', 11.5, '2023-12-09', 'a', 'b', 1, 'yy'), + (2, 1, 'o', 11.5, '2023-12-09', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 12.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 12.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 12.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 33.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (4, 2, 'o', 43.2, '2023-12-11', 'c','d',2, 'mm'), + (4, 2, 'o', 43.2, '2023-12-11', 'c','d',2, 'mm'), + (4, 2, 'o', 43.2, '2023-12-11', 'c','d',2, 'mm'), + (5, 2, 'o', 56.2, '2023-12-12', 'c','d',2, 'mi'), + (5, 2, 'o', 56.2, '2023-12-12', 'c','d',2, 'mi'), + (5, 2, 'o', 56.2, '2023-12-12', 'c','d',2, 'mi'), + (5, 2, 'o', 1.2, '2023-12-12', 'c','d',2, 'mi'); + """ + + sql """analyze table orders with sync""" + + sql """DROP MATERIALIZED VIEW IF EXISTS mv_1""" + test { + sql """ + CREATE MATERIALIZED VIEW mv_1 + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + partition by(o_orderdate) + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') + AS + select o_orderdatE, o_shippriority, o_comment, o_orderdate, + sum(o_totalprice) as sum_total, + max(o_totalpricE) as max_total, + min(o_totalprice) as min_total, + count(*) as count_all, + bitmap_union(to_bitmap(case when o_shippriority > 1 and o_orderkey IN (1, 3) then o_custkey else null end)) cnt_1, + bitmap_union(to_bitmap(case when o_shippriority > 2 and o_orderkey IN (2) then o_custkey else null end)) as cnt_2 + from (select * from orders) as t1 + group by + o_orderdatE, + o_shippriority, + o_comment, + o_orderdate; + """ + exception "Duplicate column name" + } +} \ No newline at end of file diff --git a/regression-test/suites/mtmv_p0/test_alter_job_mtmv.groovy b/regression-test/suites/mtmv_p0/test_alter_job_mtmv.groovy new file mode 100644 index 00000000000000..fa1618d5bf58f5 --- /dev/null +++ b/regression-test/suites/mtmv_p0/test_alter_job_mtmv.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_alter_job_mtmv") { + String suiteName = "test_alter_job_mtmv" + String tableName = "${suiteName}_table" + String mvName = "${suiteName}_mv" + sql """drop table if exists `${tableName}`""" + sql """drop materialized view if exists ${mvName};""" + + sql """ + CREATE TABLE `${tableName}` ( + `user_id` LARGEINT NOT NULL COMMENT '\"用户id\"', + `date` DATE NOT NULL COMMENT '\"数据灌入日期时间\"', + `num` SMALLINT NOT NULL COMMENT '\"数量\"' + ) ENGINE=OLAP + DUPLICATE KEY(`user_id`, `date`, `num`) + COMMENT 'OLAP' + PARTITION BY RANGE(`date`) + (PARTITION p201701_1000 VALUES [('0000-01-01'), ('2017-02-01')), + PARTITION p201702_2000 VALUES [('2017-02-01'), ('2017-03-01')), + PARTITION p201703_all VALUES [('2017-03-01'), ('2017-04-01'))) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 2 + PROPERTIES ('replication_num' = '1') ; + """ + sql """ + insert into ${tableName} values(1,"2017-01-15",1),(1,"2017-02-15",2),(1,"2017-03-15",3); + """ + + //This is an immediately built materialized view that cancels running tasks and creates new ones after updating job information. + // Due to the uncertainty of the case, there may be several situations here: + // 1. The task has not been created yet, so it has not been cancelled + // 2. The task has been completed, so there was no cancellation + // 3. The task has been created but not yet completed + // But regardless of the status of the previous case, + // this case is used to ensure that the newly launched task can run successfully after modifying the materialized view + sql """ + CREATE MATERIALIZED VIEW ${mvName} + REFRESH COMPLETE ON MANUAL + partition by(`date`) + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') + AS + SELECT * FROM ${tableName}; + """ + sql """alter MATERIALIZED VIEW ${mvName} refresh COMPLETE on commit; """ + waitingMTMVTaskFinishedByMvNameAllowCancel(mvName) + + sql """drop table if exists `${tableName}`""" + sql """drop materialized view if exists ${mvName};""" +} diff --git a/regression-test/suites/mtmv_p0/test_enable_date_non_deterministic_function_mtmv.groovy b/regression-test/suites/mtmv_p0/test_enable_date_non_deterministic_function_mtmv.groovy index c085779e707e3a..a8705c6ba9ed88 100644 --- a/regression-test/suites/mtmv_p0/test_enable_date_non_deterministic_function_mtmv.groovy +++ b/regression-test/suites/mtmv_p0/test_enable_date_non_deterministic_function_mtmv.groovy @@ -57,7 +57,7 @@ suite("test_enable_date_non_deterministic_function_mtmv","mtmv") { Assert.fail(); } catch (Exception e) { logger.info(e.getMessage()) - assertTrue(e.getMessage().contains("can not contain invalid expression")); + assertTrue(e.getMessage().contains("can not contain nonDeterministic expression")); } sql """drop materialized view if exists ${mvName};""" @@ -75,7 +75,7 @@ suite("test_enable_date_non_deterministic_function_mtmv","mtmv") { Assert.fail(); } catch (Exception e) { logger.info(e.getMessage()) - assertTrue(e.getMessage().contains("can not contain invalid expression")); + assertTrue(e.getMessage().contains("can not contain nonDeterministic expression")); } sql """drop materialized view if exists ${mvName};""" @@ -128,7 +128,7 @@ suite("test_enable_date_non_deterministic_function_mtmv","mtmv") { Assert.fail(); } catch (Exception e) { logger.info(e.getMessage()) - assertTrue(e.getMessage().contains("can not contain invalid expression")); + assertTrue(e.getMessage().contains("can not contain nonDeterministic expression")); } sql """drop table if exists `${tableName}`""" diff --git a/regression-test/suites/nereids_function_p0/agg_function/test_covar_samp.groovy b/regression-test/suites/nereids_function_p0/agg_function/test_covar_samp.groovy index a75a933e748364..3c17713c7ef97b 100644 --- a/regression-test/suites/nereids_function_p0/agg_function/test_covar_samp.groovy +++ b/regression-test/suites/nereids_function_p0/agg_function/test_covar_samp.groovy @@ -85,6 +85,6 @@ suite("test_covar_samp") { (4, 4, 4) """ qt_sql "select covar_samp(x,y) from test_covar_samp" - + qt_notnull3 "select covar_samp(non_nullable(x), y) from test_covar_samp" sql """ DROP TABLE IF EXISTS test_covar_samp """ } diff --git a/regression-test/suites/nereids_function_p0/scalar_function/R.groovy b/regression-test/suites/nereids_function_p0/scalar_function/R.groovy index bf21154192b655..2f79eeea94274c 100644 --- a/regression-test/suites/nereids_function_p0/scalar_function/R.groovy +++ b/regression-test/suites/nereids_function_p0/scalar_function/R.groovy @@ -59,6 +59,8 @@ suite("nereids_scalar_fn_R") { qt_sql_relace_empty06 "select replace_empty('xyz', 'x', '');" qt_sql_relace_empty07 "select replace_empty('xyz', '', '');" qt_sql_relace_empty08 "select replace_empty('', '', 'abc');" + qt_sql_relace_empty09 "select replace_empty('你a好b世c界','','b');" + qt_sql_relace_empty10 "select replace_empty('你a好b世c界','','');" qt_sql_right_Varchar_Integer "select right(kvchrs1, kint) from fn_test order by kvchrs1, kint" qt_sql_right_Varchar_Integer_notnull "select right(kvchrs1, kint) from fn_test_not_nullable order by kvchrs1, kint" qt_sql_right_String_Integer "select right(kstr, kint) from fn_test order by kstr, kint" diff --git a/regression-test/suites/nereids_p0/cte/test_cte_filter_pushdown.groovy b/regression-test/suites/nereids_p0/cte/test_cte_filter_pushdown.groovy index d4191f879a8fc7..795d1ae7e5cf40 100644 --- a/regression-test/suites/nereids_p0/cte/test_cte_filter_pushdown.groovy +++ b/regression-test/suites/nereids_p0/cte/test_cte_filter_pushdown.groovy @@ -19,6 +19,7 @@ suite("test_cte_filter_pushdown") { sql "SET enable_pipeline_engine=true" sql "SET enable_fallback_to_original_planner=false" sql "set ignore_shape_nodes='PhysicalDistribute, PhysicalProject'" + sql "set runtime_filter_mode=OFF" // CTE filter pushing down with the same filter qt_cte_filter_pushdown_1 """ explain shape plan diff --git a/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_be.groovy b/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_be.groovy index 809b8e8b291d23..668e88c6f04944 100644 --- a/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_be.groovy +++ b/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_be.groovy @@ -53,4 +53,9 @@ suite("fold_constant_by_be") { sql 'set query_timeout=12;' qt_sql "select sleep(sign(1)*5);" + + explain { + sql("verbose select substring('123456', 1, 3)") + contains "varchar(3)" + } } diff --git a/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_string_arithmatic.groovy b/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_string_arithmatic.groovy index 2bcdfc2fd24068..d7956e4b60c508 100644 --- a/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_string_arithmatic.groovy +++ b/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_string_arithmatic.groovy @@ -79,7 +79,8 @@ suite("fold_constant_string_arithmatic") { testFoldConst("SELECT StrRight('Hello World', 5)") testFoldConst("SELECT Overlay('abcdef', '123', 3, 2)") testFoldConst("SELECT Parse_Url('http://www.example.com/path?query=abc', 'HOST')") - testFoldConst("SELECT Url_Decode('%20Hello%20World%20')") + testFoldConst("SELECT Url_Decode('+Hello+World+')") + testFoldConst("SELECT Url_Encode(' Hello World ')") // Substring with negative start index // Expected behavior: Depending on the SQL engine, might return an empty string or error. @@ -187,7 +188,7 @@ suite("fold_constant_string_arithmatic") { // UrlDecode with an invalid percent-encoded string // Expected behavior: Return NULL or error due to invalid encoding. - testFoldConst("SELECT Url_Decode('%ZZHello%20World')") + // testFoldConst("SELECT Url_Decode('%ZZHello%20World')") testFoldConst("select elt(0, \"hello\", \"doris\")") testFoldConst("select elt(1, \"hello\", \"doris\")") @@ -197,6 +198,7 @@ suite("fold_constant_string_arithmatic") { testFoldConst("select append_trailing_char_if_absent('a','c')") testFoldConst("select append_trailing_char_if_absent('ac','c')") + testFoldConst("select append_trailing_char_if_absent('it','a')") testFoldConst("select ascii('1')") testFoldConst("select ascii('a')") @@ -437,7 +439,8 @@ suite("fold_constant_string_arithmatic") { testFoldConst("SELECT StrRight(cast('Hello World' as string), 5)") testFoldConst("SELECT Overlay(cast('abcdef' as string), cast('123' as string), 3, 2)") testFoldConst("SELECT Parse_Url(cast('http://www.example.com/path?query=abc' as string), cast('HOST' as string))") - testFoldConst("SELECT Url_Decode(cast('%20Hello%20World%20' as string))") + testFoldConst("SELECT Url_Decode(cast('+Hello+World+' as string))") + testFoldConst("SELECT Url_Encode(cast(' Hello World ' as string))") // Substring with negative start index // Expected behavior: Depending on the SQL engine, might return an empty string or error. @@ -525,7 +528,7 @@ suite("fold_constant_string_arithmatic") { testFoldConst("SELECT Unhex(cast('GHIJ' as string))") // UrlDecode with an invalid percent-encoded string - testFoldConst("SELECT Url_Decode(cast('%ZZHello%20World' as string))") + // testFoldConst("SELECT Url_Decode(cast('%ZZHello%20World' as string))") // Additional function tests testFoldConst("SELECT Elt(0, cast('hello' as string), cast('doris' as string))") @@ -684,4 +687,91 @@ suite("fold_constant_string_arithmatic") { // fix problem of cast date and time function exception testFoldConst("select ifnull(date_format(CONCAT_WS('', '9999-07', '-00'), '%Y-%m'),3)") + // Normal Usage Test Cases + + // Test Case 1: Append missing trailing character + testFoldConst("select append_trailing_char_if_absent('hello', '!')") + // Expected Output: 'hello!' + + // Test Case 2: Trailing character already present + testFoldConst("select append_trailing_char_if_absent('hello!', '!')") + // Expected Output: 'hello!' + + // Test Case 3: Append trailing space + testFoldConst("select append_trailing_char_if_absent('hello', ' ')") + // Expected Output: 'hello ' + + // Test Case 4: Empty string input + testFoldConst("select append_trailing_char_if_absent('', '!')") + // Expected Output: '!' + + // Test Case 5: Append different character + testFoldConst("select append_trailing_char_if_absent('hello', '?')") + // Expected Output: 'hello?' + + // Test Case 6: String ends with a different character + testFoldConst("select append_trailing_char_if_absent('hello?', '!')") + // Expected Output: 'hello?!' + + // Edge and Unusual Usage Test Cases + + // Test Case 7: Input is NULL + testFoldConst("select append_trailing_char_if_absent(NULL, '!')") + // Expected Output: NULL + + // Test Case 8: Trailing character is NULL + testFoldConst("select append_trailing_char_if_absent('hello', NULL)") + // Expected Output: NULL + + // Test Case 9: Empty trailing character + testFoldConst("select append_trailing_char_if_absent('hello', '')") + // Expected Output: Error or no change depending on implementation + + // Test Case 10: Trailing character is more than 1 character long + testFoldConst("select append_trailing_char_if_absent('hello', 'ab')") + // Expected Output: Error + + // Test Case 11: Input string is a number + testFoldConst("select append_trailing_char_if_absent(12345, '!')") + // Expected Output: Error or '12345!' + + // Test Case 12: Trailing character is a number + testFoldConst("select append_trailing_char_if_absent('hello', '1')") + // Expected Output: 'hello1' + + // Test Case 13: Input is a single character + testFoldConst("select append_trailing_char_if_absent('h', '!')") + // Expected Output: 'h!' + + // Test Case 14: Unicode character as input and trailing character + testFoldConst("select append_trailing_char_if_absent('こんにちは', '!')") + // Expected Output: 'こんにちは!' + + // Test Case 15: Multibyte character as trailing character + testFoldConst("select append_trailing_char_if_absent('hello', '😊')") + // Expected Output: 'hello😊' + + // Test Case 16: Long string input + testFoldConst("select append_trailing_char_if_absent('This is a very long string', '.')") + // Expected Output: 'This is a very long string.' + + // Error Handling Test Cases + + // Test Case 17: Invalid trailing character data type (numeric) + testFoldConst("select append_trailing_char_if_absent('hello', 1)") + // Expected Output: Error + + // Test Case 18: Invalid input data type (integer) + testFoldConst("select append_trailing_char_if_absent(12345, '!')") + // Expected Output: Error or '12345!' + + // Test Case 19: Non-ASCII characters + testFoldConst("select append_trailing_char_if_absent('Привет', '!')") + // Expected Output: 'Привет!' + + // Test Case 20: Trailing character with whitespace + testFoldConst("select append_trailing_char_if_absent('hello', ' ')") + // Expected Output: 'hello ' + + } diff --git a/regression-test/suites/nereids_p0/hint/multi_leading.groovy b/regression-test/suites/nereids_p0/hint/multi_leading.groovy index 048c2e25498950..4425fae1db2d11 100644 --- a/regression-test/suites/nereids_p0/hint/multi_leading.groovy +++ b/regression-test/suites/nereids_p0/hint/multi_leading.groovy @@ -45,99 +45,99 @@ suite("multi_leading") { sql """create table t3 (c3 int, c33 int) distributed by hash(c3) buckets 3 properties('replication_num' = '1');""" sql """create table t4 (c4 int, c44 int) distributed by hash(c4) buckets 3 properties('replication_num' = '1');""" - streamLoad { - table "t1" - db "test_multi_leading" - set 'column_separator', '|' - set 'format', 'csv' - file 't1.csv' - time 10000 - } - - streamLoad { - table "t2" - db "test_multi_leading" - set 'column_separator', '|' - set 'format', 'csv' - file 't2.csv' - time 10000 - } - - streamLoad { - table "t3" - db "test_multi_leading" - set 'column_separator', '|' - set 'format', 'csv' - file 't3.csv' - time 10000 - } - - streamLoad { - table "t4" - db "test_multi_leading" - set 'column_separator', '|' - set 'format', 'csv' - file 't4.csv' - time 10000 - } + // streamLoad { + // table "t1" + // db "test_multi_leading" + // set 'column_separator', '|' + // set 'format', 'csv' + // file 't1.csv' + // time 10000 + // } + + // streamLoad { + // table "t2" + // db "test_multi_leading" + // set 'column_separator', '|' + // set 'format', 'csv' + // file 't2.csv' + // time 10000 + // } + + // streamLoad { + // table "t3" + // db "test_multi_leading" + // set 'column_separator', '|' + // set 'format', 'csv' + // file 't3.csv' + // time 10000 + // } + + // streamLoad { + // table "t4" + // db "test_multi_leading" + // set 'column_separator', '|' + // set 'format', 'csv' + // file 't4.csv' + // time 10000 + // } // test cte inline - qt_sql1_2 """explain shape plan with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - qt_sql1_3 """explain shape plan with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - qt_sql1_4 """explain shape plan with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - - qt_sql1_res_1 """with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - qt_sql1_res_2 """with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - qt_sql1_res_3 """with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - qt_sql1_res_4 """with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" - - // test subquery alone - qt_sql2_2 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql2_3 """explain shape plan select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql2_4 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - - qt_sql2_res_1 """select count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql2_res_2 """select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql2_res_3 """select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql2_res_4 """select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - - // test subquery + cte - qt_sql3_2 """explain shape plan with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - qt_sql3_3 """explain shape plan with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - qt_sql3_4 """explain shape plan with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - - qt_sql3_res_1 """with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - qt_sql3_res_2 """with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - qt_sql3_res_3 """with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - qt_sql3_res_4 """with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" - - // test multi level subqueries - qt_sql4_1 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_2 """explain shape plan select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_3 """explain shape plan select count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_4 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - explain { - sql """shape plan select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - contains("SyntaxError: leading(t4 t2) Msg:one query block can only have one leading clause") - } - explain { - sql """shape plan select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - contains("SyntaxError: leading(t4 t2) Msg:one query block can only have one leading clause") - } - explain { - sql """shape plan select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - contains("UnUsed: leading(alias2 t1)") - } - - qt_sql4_res_0 """select count(*) from (select c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_1 """select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_2 """select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_3 """select count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_4 """select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_5 """select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_6 """select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - qt_sql4_res_7 """select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" - - // use cte in scalar query - qt_sql5_2 """explain shape plan with cte as (select c11, c1 from t1) SELECT c1 FROM cte group by c1 having sum(cte.c11) > (select /*+ leading(cte t1) */ 0.05 * avg(t1.c11) from t1 join cte on t1.c1 = cte.c11 )""" + // qt_sql1_2 """explain shape plan with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + // qt_sql1_3 """explain shape plan with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + // qt_sql1_4 """explain shape plan with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + + // qt_sql1_res_1 """with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + // qt_sql1_res_2 """with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + // qt_sql1_res_3 """with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + // qt_sql1_res_4 """with cte as (select /*+ leading(t1 t2) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t1 cte)*/ count(*) from cte,t1 where cte.c1 = t1.c1 and t1.c1 > 300;""" + + // // test subquery alone + // qt_sql2_2 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql2_3 """explain shape plan select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql2_4 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + + // qt_sql2_res_1 """select count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql2_res_2 """select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql2_res_3 """select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql2_res_4 """select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + + // // test subquery + cte + // qt_sql3_2 """explain shape plan with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + // qt_sql3_3 """explain shape plan with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + // qt_sql3_4 """explain shape plan with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + + // qt_sql3_res_1 """with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + // qt_sql3_res_2 """with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + // qt_sql3_res_3 """with cte as (select c11, c1 from t1 join t2 on c1 = c2) select count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + // qt_sql3_res_4 """with cte as (select /*+ leading(t2 t1) */ c11, c1 from t1 join t2 on c1 = c2) select /*+ leading(t3 alias1 cte) */ count(*) from (select /*+ leading(t2 t1) */ c1, c11 from t1 join t2 on c1 = c2) as alias1 join t3 on alias1.c1 = t3.c3 join cte on alias1.c1 = cte.c11;;""" + + // // test multi level subqueries + // qt_sql4_1 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_2 """explain shape plan select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_3 """explain shape plan select count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_4 """explain shape plan select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // explain { + // sql """shape plan select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // contains("SyntaxError: leading(t4 t2) Msg:one query block can only have one leading clause") + // } + // explain { + // sql """shape plan select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // contains("SyntaxError: leading(t4 t2) Msg:one query block can only have one leading clause") + // } + // explain { + // sql """shape plan select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // contains("UnUsed: leading(alias2 t1)") + // } + + // qt_sql4_res_0 """select count(*) from (select c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_1 """select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_2 """select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_3 """select count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_4 """select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_5 """select /*+ leading(t3 alias1) */ count(*) from (select c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_6 """select count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + // qt_sql4_res_7 """select /*+ leading(t3 alias1) */ count(*) from (select /*+ leading(alias2 t1) */ c1, c11 from t1 join (select /*+ leading(t4 t2) */ c2, c22 from t2 join t4 on c2 = c4) as alias2 on c1 = alias2.c2) as alias1 join t3 on alias1.c1 = t3.c3;""" + + // // use cte in scalar query + // qt_sql5_2 """explain shape plan with cte as (select c11, c1 from t1) SELECT c1 FROM cte group by c1 having sum(cte.c11) > (select /*+ leading(cte t1) */ 0.05 * avg(t1.c11) from t1 join cte on t1.c1 = cte.c11 )""" } diff --git a/regression-test/suites/nereids_p0/hint/test_hint.groovy b/regression-test/suites/nereids_p0/hint/test_hint.groovy index d279b7c1a1d905..81033e014f1eda 100644 --- a/regression-test/suites/nereids_p0/hint/test_hint.groovy +++ b/regression-test/suites/nereids_p0/hint/test_hint.groovy @@ -42,20 +42,20 @@ suite("test_hint") { sql """create table t2 (c2 int, c22 int) distributed by hash(c2) buckets 3 properties('replication_num' = '1');""" // test hint positions, remove join in order to make sure shape stable when no use hint - qt_select1_1 """explain shape plan select /*+ leading(t2 broadcast t1) */ count(*) from t1 join t2 on c1 = c2;""" + // qt_select1_1 """explain shape plan select /*+ leading(t2 broadcast t1) */ count(*) from t1 join t2 on c1 = c2;""" - qt_select1_2 """explain shape plan /*+ leading(t2 broadcast t1) */ select count(*) from t1;""" + // qt_select1_2 """explain shape plan /*+ leading(t2 broadcast t1) */ select count(*) from t1;""" - qt_select1_3 """explain shape plan select /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ count(*) from t1;""" + // qt_select1_3 """explain shape plan select /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ count(*) from t1;""" - qt_select1_4 """explain shape plan/*+DBP: ROUTE={GROUP_ID(zjaq)}*/ select count(*) from t1;""" + // qt_select1_4 """explain shape plan/*+DBP: ROUTE={GROUP_ID(zjaq)}*/ select count(*) from t1;""" - qt_select1_5 """explain shape plan /*+ leading(t2 broadcast t1) */ select /*+ leading(t2 broadcast t1) */ count(*) from t1 join t2 on c1 = c2;""" + // qt_select1_5 """explain shape plan /*+ leading(t2 broadcast t1) */ select /*+ leading(t2 broadcast t1) */ count(*) from t1 join t2 on c1 = c2;""" - qt_select1_6 """explain shape plan/*+DBP: ROUTE={GROUP_ID(zjaq)}*/ select /*+ leading(t2 broadcast t1) */ count(*) from t1 join t2 on c1 = c2;""" + // qt_select1_6 """explain shape plan/*+DBP: ROUTE={GROUP_ID(zjaq)}*/ select /*+ leading(t2 broadcast t1) */ count(*) from t1 join t2 on c1 = c2;""" - qt_select1_7 """explain shape plan /*+ leading(t2 broadcast t1) */ select /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ count(*) from t1;""" + // qt_select1_7 """explain shape plan /*+ leading(t2 broadcast t1) */ select /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ count(*) from t1;""" - qt_select1_8 """explain shape plan /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ select /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ count(*) from t1;""" + // qt_select1_8 """explain shape plan /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ select /*+DBP: ROUTE={GROUP_ID(zjaq)}*/ count(*) from t1;""" } diff --git a/regression-test/suites/nereids_p0/hint/test_use_mv.groovy b/regression-test/suites/nereids_p0/hint/test_use_mv.groovy index e511ccc11ae071..e8bf38a9066718 100644 --- a/regression-test/suites/nereids_p0/hint/test_use_mv.groovy +++ b/regression-test/suites/nereids_p0/hint/test_use_mv.groovy @@ -53,16 +53,10 @@ suite("test_use_mv") { ); """ sql """ alter table t1 add rollup r1(k2, k1); """ - waitForRollUpJob("t1", 5000, 1) + waitForRollUpJob("t1", "r1", 15000) sql """ alter table t1 add rollup r2(k2); """ - waitForRollUpJob("t1", 5000, 1) + waitForRollUpJob("t1", "r2", 15000) createMV("create materialized view k1_k2_sumk3 as select k1, k2, sum(v1) from t1 group by k1, k2;") - sql """set enable_sync_mv_cost_based_rewrite = false""" - explain { - sql """select k1 from t1;""" - contains("t1(r1)") - } - sql """set enable_sync_mv_cost_based_rewrite = true""" explain { sql """select /*+ no_use_mv */ k1 from t1;""" notContains("t1(r1)") diff --git a/regression-test/suites/nereids_p0/insert_into_table/random.groovy b/regression-test/suites/nereids_p0/insert_into_table/random.groovy index 6cc5cb2b991514..f820ca89bd2de0 100644 --- a/regression-test/suites/nereids_p0/insert_into_table/random.groovy +++ b/regression-test/suites/nereids_p0/insert_into_table/random.groovy @@ -43,4 +43,15 @@ suite('nereids_insert_random') { sql 'set delete_without_partition=true' sql '''delete from dup_t_type_cast_rd where id is not null''' sql '''delete from dup_t_type_cast_rd where id is null''' + + sql 'set enable_strict_consistency_dml=true' + sql 'drop table if exists tbl_1' + sql 'drop table if exists tbl_4' + sql """CREATE TABLE tbl_1 (k1 INT, k2 INT) DISTRIBUTED BY HASH(k1) BUCKETS 10 PROPERTIES ( "light_schema_change" = "false", "replication_num" = "1");""" + sql """INSERT INTO tbl_1 VALUES (1, 11);""" + sql 'sync' + sql """CREATE TABLE tbl_4 (k1 INT, k2 INT, v INT SUM) AGGREGATE KEY (k1, k2) DISTRIBUTED BY HASH(k1) BUCKETS 10 PROPERTIES ( "replication_num" = "1"); """ + sql """INSERT INTO tbl_4 SELECT k1, k2, k2 FROM tbl_1;""" + sql 'sync' + qt_sql_select """ select * from tbl_4; """; } diff --git a/regression-test/suites/nereids_p0/sql_functions/aggregate_functions/test_aggregate_window_functions.groovy b/regression-test/suites/nereids_p0/sql_functions/aggregate_functions/test_aggregate_window_functions.groovy new file mode 100644 index 00000000000000..a2016ba739609f --- /dev/null +++ b/regression-test/suites/nereids_p0/sql_functions/aggregate_functions/test_aggregate_window_functions.groovy @@ -0,0 +1,522 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_aggregate_window_functions") { + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + + // approx_count_distinct + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + create table test_aggregate_window_functions ( + id bigint, + name varchar(20), + province varchar(20) + ) distributed by hash(id) properties('replication_num' = '1'); + """ + sql """ + insert into test_aggregate_window_functions values + (1, 'zhangsan', "sichuan"), + (4, 'zhangsan', "sichuan"), + (11, 'zhuge', "sichuan"), + (13, null, "sichuan"), + (2, 'lisi', "chongqing"), + (5, 'lisi2', "chongqing"), + (3, 'wangwu', "hubei"), + (6, 'wangwu2', "hubei"), + (12, "quyuan", 'hubei'), + (7, 'liuxiang', "beijing"), + (8, 'wangmang', "beijing"), + (9, 'liuxiang2', "beijing"), + (10, 'wangmang', "beijing"); + """ + order_qt_agg_window_approx_count_distinct "select province, approx_count_distinct(name) over(partition by province) from test_aggregate_window_functions;" + + // count_by_enum + order_qt_agg_window_count_by_enum "select province, count_by_enum(name) over(partition by province order by name) from test_aggregate_window_functions;" + + // avg_weighted + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + create table test_aggregate_window_functions ( + id bigint, + price decimalv3(38, 10), + count bigint + ) distributed by hash(id) properties('replication_num' = '1'); + """ + sql """ + insert into test_aggregate_window_functions values + (1, 123456789.0000000001, 100), + (1, 223456789.0000000004, 200), + (1, 323456789.0000000002, 200), + (1, 423456789.0000000005, 300), + (1, 523456789.0000000005, null), + (1, 523456789.0000000005, 1000), + (1, 523456789.0000000005, 2000), + (2, 123456789.0000000005, 400), + (2, 123456789.0000000006, 500), + (2, 223456789.0000000005, 300), + (2, 323456789.0000000005, 100), + (2, 423456789.0000000005, 1000), + (3, 123456789.1000000005, 200), + (3, 223456789.6000000005, 1000), + (3, 223456789.6000000005, null), + (3, 323456789.1000000005, 2000), + (3, 423456789.2000000005, 3000); + """ + order_qt_agg_window_avg_weighted "select id, avg_weighted(price, count) over(partition by id) from test_aggregate_window_functions;" + + // corr + order_qt_agg_window_corr "select id, corr(price, count) over(partition by id) from test_aggregate_window_functions;" + // covar_samp + order_qt_agg_window_covar_samp "select id, covar_samp(price, count) over(partition by id) from test_aggregate_window_functions;" + // covar_pop + order_qt_agg_window_covar_pop "select id, covar_pop(price, count) over(partition by id) from test_aggregate_window_functions;" + + // variance_pop + order_qt_agg_window_variance_pop "select id, variance_pop(price) over(partition by id) from test_aggregate_window_functions;" + // stddev_pop + order_qt_agg_window_stddev_pop "select id, stddev_pop(price) over(partition by id) from test_aggregate_window_functions;" + + // variance_samp + order_qt_agg_window_variance_samp "select id, variance_samp(price) over(partition by id) from test_aggregate_window_functions;" + // stddev_samp + order_qt_agg_window_stddev_samp "select id, stddev_samp(price) over(partition by id) from test_aggregate_window_functions;" + + // group_bit_or + order_qt_agg_window_group_bit_or "select id, group_bit_or(count) over(partition by id) from test_aggregate_window_functions;" + // group_bit_and + order_qt_agg_window_group_bit_and "select id, group_bit_and(count) over(partition by id) from test_aggregate_window_functions;" + // group_bit_xor + order_qt_agg_window_group_bit_xor "select id, group_bit_xor(count) over(partition by id) from test_aggregate_window_functions;" + + // bitmap_agg + order_qt_agg_window_bitmap_agg "select id, bitmap_to_string(bitmap_agg(count) over(partition by id)) from test_aggregate_window_functions;" + + // BITMAP_UNION_INT + order_qt_agg_window_bitmap_union_int "select id, bitmap_union_int(count) over(partition by id) from test_aggregate_window_functions;" + + // histogram + order_qt_agg_window_histogram "select id, histogram(count) over(partition by id) from test_aggregate_window_functions;" + + // max_by + order_qt_agg_window_max_by "select id, count, max_by(price, count) over(partition by id order by count) from test_aggregate_window_functions;" + // min_by + order_qt_agg_window_min_by "select id, count, min_by(price, count) over(partition by id order by count) from test_aggregate_window_functions;" + // any_value + order_qt_agg_window_any_value "select id, any_value(price) over(partition by id order by price) from test_aggregate_window_functions;" + + // percentile + order_qt_agg_window_percentile "select id, percentile(price, 0.95) over(partition by id) from test_aggregate_window_functions;" + // percentile_array + order_qt_agg_window_percentile_array "select id, percentile_array(price, array(0.25, 0.5, 0.75)) over(partition by id) from test_aggregate_window_functions;" + // percentile_approx + order_qt_agg_window_percentile_approx "select id, percentile_approx(price, 0.95) over(partition by id) from test_aggregate_window_functions;" + // percentile_approx_weighted + order_qt_agg_window_percentile_approx_weighted "select id, percentile_approx_weighted(price, count, 0.95) over(partition by id) from test_aggregate_window_functions;" + + // topn + order_qt_agg_window_topn "select id, topn(price, 3) over(partition by id) from test_aggregate_window_functions;" + // topn_weighted + order_qt_agg_window_topn_weighted "select id, topn_weighted(price, count, 3) over(partition by id) from test_aggregate_window_functions;" + // topn_array + order_qt_agg_window_topn_array "select id, topn_array(price, 3) over(partition by id) from test_aggregate_window_functions;" + + // multi_distinct_count + order_qt_agg_window_multi_distinct_count "select id, multi_distinct_count(price) over(partition by id) from test_aggregate_window_functions;" + + // multi_distinct_count_distribute_key, FE not implemented yet + // order_qt_agg_window_multi_distinct_count_distribute_key "select id, multi_distinct_distribute_key(id) over(partition by id) from test_aggregate_window_functions;" + // order_qt_agg_window_multi_distinct_count_distribute_key "select id, multi_distinct_count_distribute_key(price) over(partition by id) from test_aggregate_window_functions;" + + // multi_distinct_sum + order_qt_agg_window_multi_distinct_sum "select id, multi_distinct_sum(price) over(partition by id) from test_aggregate_window_functions;" + + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + create table test_aggregate_window_functions ( + id bigint, + id2 bigint, + user_ids varchar(64) + ) distributed by hash(id) properties('replication_num' = '1'); + """ + sql """ + insert into test_aggregate_window_functions values + (1, 1, '1,2'), + (1, 3, '1,2'), + (1, 2, '1,3'), + (1, 6, null), + (2, 2, '2,3'), + (2, 5, '2,3'), + (2, 9, '2,4'), + (3, 10, '3'), + (3, 1, '4'), + (3, 5, '5'), + (3, 9, '6'); + """ + // bitmap_union + order_qt_agg_window_bitmap_union "select id, bitmap_to_string(bitmap_union(bitmap_from_string(user_ids)) over(partition by id)) from test_aggregate_window_functions;" + // bitmap_intersect + order_qt_agg_window_bitmap_intersect "select id, bitmap_to_string(bitmap_intersect(bitmap_from_string(user_ids)) over(partition by id)) from test_aggregate_window_functions;" + // group_bitmap_xor + order_qt_agg_window_group_bitmap_xor "select id, bitmap_to_string(group_bitmap_xor(bitmap_from_string(user_ids)) over(partition by id)) from test_aggregate_window_functions;" + // bitmap_union_count + order_qt_agg_window_bitmap_union_count "select id, bitmap_union_count(bitmap_from_string(user_ids)) over(partition by id) from test_aggregate_window_functions;" + + // collect_list + order_qt_agg_window_collect_list "select id, collect_list(user_ids) over(partition by id order by user_ids) from test_aggregate_window_functions;" + // collect_set + order_qt_agg_window_collect_set "select id, collect_set(user_ids) over(partition by id order by user_ids) from test_aggregate_window_functions;" + // array_agg + order_qt_agg_window_array_agg "select id, array_agg(user_ids) over(partition by id order by user_ids) from test_aggregate_window_functions;" + + // group_concat + order_qt_agg_window_group_concat "select id, group_concat(user_ids) over(partition by id order by user_ids) from test_aggregate_window_functions;" + // group_concat distinct + // DISTINCT not allowed in analytic function: group_concat(line 1, pos 11) + // order_qt_agg_window_group_concat_distinct "select id, group_concat(distinct user_ids) over(partition by id) from test_aggregate_window_functions;" + // group_concat order by + // java.sql.SQLException: errCode = 2, detailMessage = Cannot invoke "org.apache.doris.analysis.Expr.getChildren()" because "root" is null + // order_qt_agg_window_group_concat_order_by "select id, group_concat(user_ids order by id2) over(partition by id) from test_aggregate_window_functions;" + // sum0 + order_qt_agg_window_sum0 "select id, sum0(id2) over(partition by id) from test_aggregate_window_functions;" + + // group_array_intersect + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + create table test_aggregate_window_functions ( + id bigint, + user_ids array + ) distributed by hash(id) properties('replication_num' = '1'); + """ + sql """ + insert into test_aggregate_window_functions values + (1, [1,2]), + (1, [1,2]), + (1, [1,3]), + (1, null), + (2, [2,3]), + (2, [2,3]), + (2, [2,4]), + (3, [3,4]), + (3, [4,3,null]), + (3, [5,3,4]), + (3, [3,6,4]); + """ + order_qt_agg_window_group_array_intersect "select id, array_sort(group_array_intersect(user_ids) over(partition by id)) from test_aggregate_window_functions;" + + // hll_union_agg + sql """drop TABLE if EXISTS test_window_func_hll;""" + sql """ + create table test_window_func_hll( + dt date, + id int, + name char(10), + province char(10), + os char(10), + pv hll hll_union + ) + Aggregate KEY (dt,id,name,province,os) + distributed by hash(id) buckets 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2" + ) + """ + sql """ + insert into test_window_func_hll + SELECT + dt,id,name,province,os,pv + from ( + SELECT '2022-05-05' as dt,'10001' as id,'test01' as name,'beijing' as province,'windows' as os,hll_hash('windows') as pv + union all + SELECT '2022-05-05' as dt,'10002' as id,'test01' as name,'beijing' as province,'linux' as os,hll_hash('linux') as pv + union all + SELECT '2022-05-05' as dt,'10003' as id,'test01' as name,'beijing' as province,'macos' as os,hll_hash('macos') as pv + union all + SELECT '2022-05-05' as dt,'10004' as id,'test01' as name,'hebei' as province,'windows' as os,hll_hash('windows') as pv + union all + SELECT '2022-05-06' as dt,'10001' as id,'test01' as name,'shanghai' as province,'windows' as os,hll_hash('windows') as pv + union all + SELECT '2022-05-06' as dt,'10002' as id,'test01' as name,'shanghai' as province,'linux' as os,hll_hash('linux') as pv + union all + SELECT '2022-05-06' as dt,'10003' as id,'test01' as name,'jiangsu' as province,'macos' as os,hll_hash('macos') as pv + union all + SELECT '2022-05-06' as dt,'10004' as id,'test01' as name,'shanxi' as province,'windows' as os,hll_hash('windows') as pv + union all + SELECT '2022-05-07' as dt,'10005' as id,'test01' as name,'shanxi' as province,'windows' as os,hll_empty() as pv + ) as a + """ + order_qt_window_func_hll_union_agg "select province, os, hll_union_agg(pv) over(partition by province) from test_window_func_hll;" + order_qt_window_func_hll_union "select province, os, hll_cardinality(hll_union(pv) over(partition by province)) from test_window_func_hll;" + + // map_agg + sql "DROP TABLE IF EXISTS `test_map_agg`;" + sql """ + CREATE TABLE IF NOT EXISTS `test_map_agg` ( + `id` int(11) NOT NULL, + `label_name` varchar(32) NOT NULL, + `value_field` string + ) + DISTRIBUTED BY HASH(`id`) + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + insert into `test_map_agg` values + (1, "LA", "V1_1"), + (1, "LB", "V1_2"), + (1, "LC", "V1_3"), + (2, "LA", "V2_1"), + (2, "LB", "V2_2"), + (2, "LC", "V2_3"), + (3, "LA", "V3_1"), + (3, "LB", "V3_2"), + (3, "LC", "V3_3"), + (4, "LA", "V4_1"), + (4, "LB", "V4_2"), + (4, "LC", "V4_3"), + (5, "LA", "V5_1"), + (5, "LB", "V5_2"), + (5, "LC", "V5_3"); + """ + order_qt_map_agg "select id, map_agg(label_name, value_field) over(partition by id) from test_map_agg;" + + // quantile_state + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + CREATE TABLE test_aggregate_window_functions ( + `dt` int(11) NULL COMMENT "", + `id` int(11) NULL COMMENT "", + `price` quantile_state QUANTILE_UNION NOT NULL COMMENT "" + ) ENGINE=OLAP + AGGREGATE KEY(`dt`, `id`) + DISTRIBUTED BY HASH(`dt`) + PROPERTIES ( + "replication_num" = "1" + ); + """ + sql """INSERT INTO test_aggregate_window_functions values(20220201,0, to_quantile_state(1, 2048))""" + sql """INSERT INTO test_aggregate_window_functions values(20220201,1, to_quantile_state(-1, 2048)), + (20220201,1, to_quantile_state(0, 2048)),(20220201,1, to_quantile_state(1, 2048)), + (20220201,1, to_quantile_state(2, 2048)),(20220201,1, to_quantile_state(3, 2048)) + """ + + // quantile_union + order_qt_agg_window_quantile_union """select dt, id, quantile_percent(quantile_union(price), 0.5) from test_aggregate_window_functions group by dt, id;""" + + // retention + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + CREATE TABLE test_aggregate_window_functions( + id TINYINT, + action STRING, + time DATETIME + ) DUPLICATE KEY(id) + DISTRIBUTED BY HASH(id) + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + INSERT INTO test_aggregate_window_functions VALUES + (1,'pv','2022-01-01 08:00:05'), + (2,'pv','2022-01-01 10:20:08'), + (1,'buy','2022-01-02 15:30:10'), + (2,'pv','2022-01-02 17:30:05'), + (3,'buy','2022-01-01 05:30:09'), + (3,'buy','2022-01-02 08:10:15'), + (4,'pv','2022-01-02 21:09:15'), + (5,'pv','2022-01-01 22:10:53'), + (5,'pv','2022-01-02 19:10:52'), + (5,'buy','2022-01-02 20:00:50'); + """ + order_qt_agg_window_retention_0 """ + select id, retention(action='pv' and to_date(time)='2022-01-01', + action='buy' and to_date(time)='2022-01-02') as retention + from test_aggregate_window_functions + group by id;""" + order_qt_agg_window_retention_1 """ + select id, retention(action='pv' and to_date(time)='2022-01-01', + action='buy' and to_date(time)='2022-01-02') over (partition by id) as retention + from test_aggregate_window_functions;""" + + // sequence_match and sequence_count + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + CREATE TABLE test_aggregate_window_functions( + `uid` int, + `date` datetime, + `number` int + ) DISTRIBUTED BY HASH(uid) + PROPERTIES ( + "replication_num" = "1" + ); + """ + sql """ + INSERT INTO test_aggregate_window_functions values + (1, '2022-11-01 10:41:00', 1), + (1, '2022-11-01 11:41:00', 5), + (1, '2022-11-01 12:41:00', 7), + (1, '2022-11-01 12:42:00', 9), + (1, '2022-11-01 12:52:00', 1), + (1, '2022-11-01 13:41:00', 4), + (1, '2022-11-01 13:51:00', 3), + (1, '2022-11-01 14:51:00', 5), + (2, '2022-11-01 20:41:00', 1), + (2, '2022-11-01 23:51:00', 3), + (2, '2022-11-01 22:41:00', 7), + (2, '2022-11-01 22:42:00', 9), + (2, '2022-11-01 23:41:00', 4); + """ + order_qt_agg_window_sequence_match "select uid, sequence_match('(?1)(?2)', date, number = 1, number = 5) over(partition by uid) from test_aggregate_window_functions;" + order_qt_agg_window_sequence_count "select uid, sequence_count('(?1)(?2)', date, number = 1, number = 5) over(partition by uid) from test_aggregate_window_functions;" + + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + CREATE TABLE test_aggregate_window_functions( + `id` INT(11) null COMMENT "", + `a` array null COMMENT "", + `b` array> null COMMENT "", + `s` array null COMMENT "" + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "storage_format" = "V2" + ); + """ + sql """ + insert into test_aggregate_window_functions values + (1,[1,2,3],[[1],[1,2,3],[2]],["ab","123","114514"]), + (2,[20],[[2]],["cd"]), + (3,[100],[[1]],["efg"]) , + (4,null,[null],null), + (5,[null,2],[[2],null],[null,'c']); + """ + // sum_foreach + order_qt_agg_window_sum_foreach "select id, sum_foreach(a) over(partition by id) from test_aggregate_window_functions;" + order_qt_agg_window_sum_foreach2 "select id, sum_foreach(a) over(order by id rows between 2 preceding and 1 preceding) from test_aggregate_window_functions;" + // covar_foreach + order_qt_agg_window_covar_foreach "select id, covar_foreach(a, a) over(partition by id) from test_aggregate_window_functions;" + + sql "drop table if exists test_aggregate_window_functions" + + sql """ + CREATE TABLE IF NOT EXISTS `test_aggregate_window_functions` ( + `kint` int(11) not null, + `kbint` int(11) not null, + `kstr` string not null, + `kstr2` string not null, + `kastr` array not null + ) engine=olap + DISTRIBUTED BY HASH(`kint`) BUCKETS 4 + properties("replication_num" = "1"); + """ + + sql """ + INSERT INTO `test_aggregate_window_functions` VALUES + ( 1, 1, 'string1', 'string3', ['s11', 's12', 's13'] ), + ( 1, 2, 'string2', 'string1', ['s21', 's22', 's23'] ), + ( 2, 3, 'string3', 'string2', ['s31', 's32', 's33'] ), + ( 1, 1, 'string1', 'string3', ['s11', 's12', 's13'] ), + ( 1, 2, 'string2', 'string1', ['s21', 's22', 's23'] ), + ( 2, 3, 'string3', 'string2', ['s31', 's32', 's33'] ); + """ + + order_qt_agg_window_group_concat_state1 "select kint, group_concat(kstr) over(partition by kint) from test_aggregate_window_functions;" + sql "select kint, group_concat_union(group_concat_state(kstr)) over(partition by kint) from test_aggregate_window_functions;" + order_qt_agg_window_group_concat_state_merge "select kint, group_concat_merge(group_concat_state(kstr)) over(partition by kint) from test_aggregate_window_functions;" + + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ CREATE TABLE IF NOT EXISTS test_aggregate_window_functions ( + tag_group bigint(20) NULL COMMENT "标签组", + bucket int(11) NOT NULL COMMENT "分桶字段", + members bitmap BITMAP_UNION COMMENT "人群") ENGINE=OLAP + AGGREGATE KEY(tag_group, bucket) + DISTRIBUTED BY HASH(bucket) BUCKETS 64 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1"); + """ + sql """ + insert into test_aggregate_window_functions values + (1, 1, bitmap_from_string('1,2,3,4')), + (2, 1, bitmap_from_string('1,2,3')), + (3, 1, bitmap_from_string('1,2')), + (1, 2, bitmap_from_string('2,3,4,5,6')), + (2, 2, bitmap_from_string('2,3,4')), + (3, 2, bitmap_from_string('2,3')); + """ + order_qt_agg_window_orthogonal_bitmap1 "select bucket, bitmap_to_string(orthogonal_bitmap_intersect(members, tag_group, 1, 2, 3) over(partition by bucket)) from test_aggregate_window_functions;" + order_qt_agg_window_orthogonal_bitmap2 "select bucket, orthogonal_bitmap_intersect_count(members, tag_group, 1, 2, 3) over(partition by bucket) from test_aggregate_window_functions;" + order_qt_agg_window_orthogonal_bitmap3 "select bucket, orthogonal_bitmap_union_count(members) over(partition by bucket) from test_aggregate_window_functions;" + + // window_funnel + sql """ + drop table if exists test_aggregate_window_functions; + """ + sql """ + CREATE TABLE test_aggregate_window_functions( + user_id BIGINT, + event_name VARCHAR(64), + event_timestamp datetime, + phone_brand varchar(64), + tab_num int + ) distributed by hash(event_timestamp) buckets 3 properties("replication_num"="1"); + """ + sql """ + INSERT INTO test_aggregate_window_functions VALUES + (100123, '登录', '2022-05-14 10:01:00', 'HONOR', 1), + (100123, '访问', '2022-05-14 10:02:00', 'HONOR', 2), + (100123, '登录2', '2022-05-14 10:03:00', 'HONOR', 3), + (100123, '下单', '2022-05-14 10:04:00', "HONOR", 4), + (100123, '付款', '2022-05-14 10:10:00', 'HONOR', 4), + (100125, '登录', '2022-05-15 11:00:00', 'XIAOMI', 1), + (100125, '访问', '2022-05-15 11:01:00', 'XIAOMI', 2), + (100125, '下单', '2022-05-15 11:02:00', 'XIAOMI', 6), + (100126, '登录', '2022-05-15 12:00:00', 'IPHONE', 1), + (100126, '访问', '2022-05-15 12:01:00', 'HONOR', 2), + (100127, '登录', '2022-05-15 11:30:00', 'VIVO', 1), + (100127, '访问', '2022-05-15 11:31:00', 'VIVO', 5); + """ + order_qt_agg_window_window_funnel """ + select user_id, window_funnel(3600, "fixed", event_timestamp, event_name = '登录', event_name = '访问', event_name = '下单', event_name = '付款') over(partition by user_id) from test_aggregate_window_functions; + """ + +} \ No newline at end of file diff --git a/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_function.groovy b/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_function.groovy index 0a986f249e563e..ae7489978397c7 100644 --- a/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_function.groovy +++ b/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_function.groovy @@ -248,6 +248,9 @@ suite("test_date_function") { sql """ truncate table ${tableName} """ sql """ insert into ${tableName} values ("2009-10-04 22:23:00") """ qt_sql """ select date_format(test_datetime, 'yyyy-MM-dd') from ${tableName}; """ + qt_sql_date_format_long """ select date_format(test_datetime, '%f %V %f %l %V %I %S %p %w %r %j %f %l %I %D %w %j %D %e %s %V %f %D %M %s %X %U %v %c %u %x %r %j %a %h %s %m %a %v %u %b') from ${tableName};""" + qt_sql_date_format_long """ select date_format(non_nullable(test_datetime), '%f %V %f %l %V %I %S %p %w %r %j %f %l %I %D %w %j %D %e %s %V %f %D %M %s %X %U %v %c %u %x %r %j %a %h %s %m %a %v %u %b') from ${tableName};""" + sql """ truncate table ${tableName} """ sql """ insert into ${tableName} values ("2010-11-30 23:59:59") """ @@ -465,7 +468,9 @@ suite("test_date_function") { qt_sql """ SELECT id,FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") FROM ${tableName} WHERE FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") <= '2022-08-01 00:00:00' ORDER BY id; """ qt_sql """ SELECT id,FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") FROM ${tableName} WHERE FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") LIKE '2022-08-01 00:00:00' ORDER BY id; """ qt_sql """ SELECT id,FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") FROM ${tableName} WHERE FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") = '2022-08-01 17:00:31' ORDER BY id; """ - + qt_sql """ SELECT id,FROM_UNIXTIME(update_time,null) FROM ${tableName} WHERE FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") = '2022-08-01 17:00:31' ORDER BY id; """ + qt_sql """ SELECT id,FROM_UNIXTIME(update_time,'%f %V %f %l %V %I %S %p %w %r %j %f %l %I %D %w %j %D %e %s %V %f %D %M %s %X %U %v %c %u %x %r %j %a %h %s %m %a %v %u %b') FROM ${tableName} WHERE FROM_UNIXTIME(update_time,"%Y-%m-%d %H:%i:%s") = '2022-08-01 17:00:31' ORDER BY id; """ + qt_sql """SELECT CURDATE() = CURRENT_DATE();""" qt_sql """SELECT unix_timestamp(CURDATE()) = unix_timestamp(CURRENT_DATE());""" @@ -475,6 +480,8 @@ suite("test_date_function") { qt_sql """ select date_format('2025-01-01', '%X %V'); """ qt_sql """ select date_format('2022-08-04', '%X %V %w'); """ qt_sql_date_format_long """ select date_format(cast('2011-06-24' as DATETIMEV2(0)), '%f %V %f %l %V %I %S %p %w %r %j %f %l %I %D %w %j %D %e %s %V %f %D %M %s %X %U %v %c %u %x %r %j %a %h %s %m %a %v %u %b') """ + qt_sql_date_format_long """ select date_format(null, '%f %V %f %l %V %I %S %p %w %r %j %f %l %I %D %w %j %D %e %s %V %f %D %M %s %X %U %v %c %u %x %r %j %a %h %s %m %a %v %u %b') """ + qt_sql """ select STR_TO_DATE('Tue Jul 12 20:00:45 CST 2022', '%a %b %e %H:%i:%s %Y'); """ qt_sql """ select STR_TO_DATE('Tue Jul 12 20:00:45 CST 2022', '%a %b %e %T CST %Y'); """ qt_sql """ select STR_TO_DATE('2018-4-2 15:3:28','%Y-%m-%d %H:%i:%s'); """ diff --git a/regression-test/suites/nereids_p0/sql_functions/math_functions/test_conv.groovy b/regression-test/suites/nereids_p0/sql_functions/math_functions/test_conv.groovy index 214e65ff4bdb16..b46aee62ba3928 100644 --- a/regression-test/suites/nereids_p0/sql_functions/math_functions/test_conv.groovy +++ b/regression-test/suites/nereids_p0/sql_functions/math_functions/test_conv.groovy @@ -23,5 +23,15 @@ suite("test_conv") { qt_select3 "select conv('-ff', 24, 2);" // if beyond the max value of uint64, use max_uint64 as res qt_select4 "select conv('fffffffffffffffffffffffffffffffff', 24, 10);" + + sql """DROP TABLE IF EXISTS `test_tb`; """ + sql """ create table test_tb(int_1 int, float_2 float) PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ insert into test_tb values(1, 1.464868); """ + + qt_select5 """ select conv(float_2,10,2),float_2 from test_tb; """ } diff --git a/regression-test/suites/nereids_p0/sql_functions/string_functions/test_split_part.groovy b/regression-test/suites/nereids_p0/sql_functions/string_functions/test_split_part.groovy index 76a8e4af13f0c1..c93349e3ceac75 100644 --- a/regression-test/suites/nereids_p0/sql_functions/string_functions/test_split_part.groovy +++ b/regression-test/suites/nereids_p0/sql_functions/string_functions/test_split_part.groovy @@ -30,4 +30,9 @@ suite("test_split_part") { } qt_1 "select split_part(k8, '1', 1), k8, split_part(concat(k8, '12'), '1', 1) from nereids_test_query_db.test order by k8 limit 2;" + + explain { + sql("""verbose select split_part(replace_empty('we', "", "u"), ":", 5 - 5);""") + notContains "null_type" + } } \ No newline at end of file diff --git a/regression-test/suites/nereids_p0/sql_functions/string_functions/test_string_function.groovy b/regression-test/suites/nereids_p0/sql_functions/string_functions/test_string_function.groovy index 20c8294b1144ef..6e9cd947bc2ed5 100644 --- a/regression-test/suites/nereids_p0/sql_functions/string_functions/test_string_function.groovy +++ b/regression-test/suites/nereids_p0/sql_functions/string_functions/test_string_function.groovy @@ -191,4 +191,27 @@ suite("test_string_function") { qt_sql "select sub_replace(\"this is origin str\",\"NEW-STR\",1);" qt_sql "select sub_replace(\"doris\",\"***\",1,2);" + sql """ set debug_skip_fold_constant = true;""" + qt_sub_replace_utf8_sql1 " select sub_replace('你好世界','a',1);" + qt_sub_replace_utf8_sql2 " select sub_replace('你好世界','ab',1);" + qt_sub_replace_utf8_sql3 " select sub_replace('你好世界','ab',1,20);" + qt_sub_replace_utf8_sql4 " select sub_replace('你好世界','abcd我',1,2);" + qt_sub_replace_utf8_sql5 " select sub_replace('你好世界','a',6);" + qt_sub_replace_utf8_sql6 " select sub_replace('你好世界','大家',0);" + qt_sub_replace_utf8_sql7 " select sub_replace('你好世界','大家114514',1,20);" + qt_sub_replace_utf8_sql8 " select sub_replace('你好世界','大家114514',6,20);" + qt_sub_replace_utf8_sql9 " select sub_replace('你好世界','大家',4);" + qt_sub_replace_utf8_sql10 " select sub_replace('你好世界','大家',-1);" + sql """ set debug_skip_fold_constant = false;""" + qt_sub_replace_utf8_sql1 " select sub_replace('你好世界','a',1);" + qt_sub_replace_utf8_sql2 " select sub_replace('你好世界','ab',1);" + qt_sub_replace_utf8_sql3 " select sub_replace('你好世界','ab',1,20);" + qt_sub_replace_utf8_sql4 " select sub_replace('你好世界','abcd我',1,2);" + qt_sub_replace_utf8_sql5 " select sub_replace('你好世界','a',6);" + qt_sub_replace_utf8_sql6 " select sub_replace('你好世界','大家',0);" + qt_sub_replace_utf8_sql7 " select sub_replace('你好世界','大家114514',1,20);" + qt_sub_replace_utf8_sql8 " select sub_replace('你好世界','大家114514',6,20);" + qt_sub_replace_utf8_sql9 " select sub_replace('你好世界','大家',4);" + qt_sub_replace_utf8_sql10 " select sub_replace('你好世界','大家',-1);" + } diff --git a/regression-test/suites/nereids_p0/subquery/correlated_scalar_subquery.groovy b/regression-test/suites/nereids_p0/subquery/correlated_scalar_subquery.groovy new file mode 100644 index 00000000000000..80d9cdb4bb2322 --- /dev/null +++ b/regression-test/suites/nereids_p0/subquery/correlated_scalar_subquery.groovy @@ -0,0 +1,223 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("correlated_scalar_subquery") { + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + sql """ + drop table if exists correlated_scalar_t1; + """ + sql """ + drop table if exists correlated_scalar_t2; + """ + + sql """ + drop table if exists correlated_scalar_t3; + """ + + sql """ + create table correlated_scalar_t1 + (c1 bigint, c2 bigint) + ENGINE=OLAP + DUPLICATE KEY(c1, c2) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(c1) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ); + """ + sql """ + create table correlated_scalar_t2 + (c1 bigint, c2 bigint) + ENGINE=OLAP + DUPLICATE KEY(c1, c2) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(c1) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + create table correlated_scalar_t3 + (c1 bigint, c2 bigint) + ENGINE=OLAP + DUPLICATE KEY(c1, c2) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(c1) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + insert into correlated_scalar_t1 values (1,null),(null,1),(1,2), (null,2),(1,3), (2,4), (2,5), (3,3), (3,4), (20,2), (22,3), (24,4),(null,null); + """ + sql """ + insert into correlated_scalar_t2 values (1,null),(null,1),(1,4), (1,2), (null,3), (2,4), (3,7), (3,9),(null,null),(5,1); + """ + sql """ + insert into correlated_scalar_t3 values (1,null),(null,1),(1,9), (1,8), (null,7), (2,6), (3,7), (3,9),(null,null),(5,1); + """ + + qt_select_where1 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 and correlated_scalar_t2.c2 < 4) order by c1;""" + qt_select_where2 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select any_value(c1) from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 and correlated_scalar_t2.c2 < 4) order by c1;""" + qt_select_where3 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select e1 from (select 1 k1) as t lateral view explode_numbers(5) tmp1 as e1 where correlated_scalar_t1.c1 = e1 and correlated_scalar_t1.c2 = e1 order by e1) order by c1;""" + qt_select_where4 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select col from (select c1 col from correlated_scalar_t2 group by c1 ) tt where correlated_scalar_t1.c1 = tt.col) order by c1;""" + qt_select_where5 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select col from (select max(c1) over() col from correlated_scalar_t2 ) tt where correlated_scalar_t1.c1 = tt.col) order by c1;""" + qt_select_where6 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select min(correlated_scalar_t2.c1) from correlated_scalar_t2 join correlated_scalar_t3 on correlated_scalar_t2.c1 = correlated_scalar_t3.c2 where correlated_scalar_t2.c2 = correlated_scalar_t1.c1) order by c1;""" + qt_select_where7 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select x from (select count(c1)x from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 order by count(c1))tt) order by c1;""" + qt_select_where8 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(col) from (select c1 col from correlated_scalar_t2 group by c1 ) tt where correlated_scalar_t1.c1 = tt.col) order by c1;""" + qt_select_where9 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(col) from (select max(c1) over() col from correlated_scalar_t2) tt where correlated_scalar_t1.c1 = tt.col) order by c1;""" + qt_select_where10 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(correlated_scalar_t2.c1) from correlated_scalar_t2 join correlated_scalar_t3 on correlated_scalar_t2.c1 = correlated_scalar_t3.c2 where correlated_scalar_t2.c2 = correlated_scalar_t1.c1) order by c1;""" + qt_select_where11 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(c1) from correlated_scalar_t2 having correlated_scalar_t1.c1 = count(c1)) order by c1;""" + + qt_select_project1 """select c1, sum((select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 and correlated_scalar_t2.c2 > 7)) from correlated_scalar_t1 group by c1 order by c1;""" + qt_select_project2 """select c1, sum((select any_value(c1) from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 and correlated_scalar_t2.c2 > 7)) from correlated_scalar_t1 group by c1 order by c1;""" + + qt_select_join1 """select correlated_scalar_t1.* from correlated_scalar_t1 join correlated_scalar_t2 on correlated_scalar_t1.c1 = correlated_scalar_t2.c2 and correlated_scalar_t1.c2 > (select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 and correlated_scalar_t2.c2 > 7);""" + qt_select_join2 """select correlated_scalar_t1.* from correlated_scalar_t1 join correlated_scalar_t2 on correlated_scalar_t1.c1 = correlated_scalar_t2.c2 and correlated_scalar_t1.c2 > (select any_value(c1) from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 and correlated_scalar_t2.c2 > 7);""" + + qt_select_having1 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select correlated_scalar_t2.c1 from correlated_scalar_t2 where correlated_scalar_t2.c2 < 4 having correlated_scalar_t1.c1 = correlated_scalar_t2.c1);""" + qt_select_having2 """select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select any_value(correlated_scalar_t2.c1) from correlated_scalar_t2 where correlated_scalar_t2.c2 < 4 having correlated_scalar_t1.c1 = any_value(correlated_scalar_t2.c1));""" + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1); + """ + exception "correlate scalar subquery must return only 1 row" + } + + test { + sql """ + select c1, sum((select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1)) from correlated_scalar_t1 group by c1 order by c1; + """ + exception "correlate scalar subquery must return only 1 row" + } + + test { + sql """ + select correlated_scalar_t1.* from correlated_scalar_t1 join correlated_scalar_t2 on correlated_scalar_t1.c1 = correlated_scalar_t2.c2 and correlated_scalar_t1.c2 > (select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1); + """ + exception "correlate scalar subquery must return only 1 row" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select c1 from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 limit 2); + """ + exception "limit is not supported in correlated subquery" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select e1 from (select k1 from (select 1 k1 ) as t where correlated_scalar_t1.c1 = k1 ) tt lateral view explode_numbers(5) tmp1 as e1 order by e1); + """ + exception "access outer query's column before lateral view is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select e1 from (select 1 k1) as t lateral view explode_numbers(5) tmp1 as e1 where correlated_scalar_t1.c1 = e1 having correlated_scalar_t1.c2 = e1 order by e1); + """ + exception "access outer query's column in two places is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select e1 from (select 1 k1) as t lateral view explode_numbers(5) tmp1 as e1 where correlated_scalar_t1.c1 = e1 or correlated_scalar_t1.c2 = e1 order by e1); + """ + exception "Unsupported correlated subquery with correlated predicate" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select correlated_scalar_t1.c1 from correlated_scalar_t2); + """ + exception "access outer query's column in project is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select max(c1) over() from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 order by c1); + """ + exception "access outer query's column before window function is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select max(correlated_scalar_t1.c1) over() from correlated_scalar_t2 order by c1); + """ + exception "access outer query's column in project is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select min(correlated_scalar_t2.c1) from correlated_scalar_t2 join (select correlated_scalar_t3.c1 from correlated_scalar_t3 where correlated_scalar_t1.c1 = correlated_scalar_t3.c2 ) tt on correlated_scalar_t2.c2 > tt.c1); + """ + exception "access outer query's column before join is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select correlated_scalar_t2.c1 from correlated_scalar_t2 join correlated_scalar_t3 on correlated_scalar_t1.c1 = correlated_scalar_t3.c2 ); + """ + exception "access outer query's column in join is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select c1 from correlated_scalar_t2 order by correlated_scalar_t1.c1); + """ + exception "Unknown column" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select c1 from (select c1 from correlated_scalar_t2 order by correlated_scalar_t1.c1)tt ); + """ + exception "Unknown column" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(c1) from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 group by c2); + """ + exception "access outer query's column before agg with group by is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(c1) from correlated_scalar_t2 where correlated_scalar_t1.c1 = correlated_scalar_t2.c1 having count(c1) > 10); + """ + exception "only project, sort and subquery alias node is allowed after agg node" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(correlated_scalar_t1.c1) from correlated_scalar_t2); + """ + exception "access outer query's column in aggregate is not supported" + } + + test { + sql """ + select c1 from correlated_scalar_t1 where correlated_scalar_t1.c2 > (select count(col) from (select max(c1) col from correlated_scalar_t2 where correlated_scalar_t1.c1 = c1) tt ); + """ + exception "access outer query's column before two agg nodes is not supported" + } +} \ No newline at end of file diff --git a/regression-test/suites/nereids_p0/subquery/test_subquery.groovy b/regression-test/suites/nereids_p0/subquery/test_subquery.groovy index c8121d03b312b5..82b858cf10be78 100644 --- a/regression-test/suites/nereids_p0/subquery/test_subquery.groovy +++ b/regression-test/suites/nereids_p0/subquery/test_subquery.groovy @@ -295,4 +295,6 @@ suite("test_subquery") { contains("partitions=3/") } sql """drop table if exists scalar_subquery_t""" + + sql """select e1 from (select 1) t lateral view explode((select sequence(CURRENT_DATE(), date_add(CURRENT_DATE(), interval 2 day)))) t2 as e1;""" } diff --git a/regression-test/suites/nereids_p0/system/test_query_sys.groovy b/regression-test/suites/nereids_p0/system/test_query_sys.groovy index e0e68f909fd94f..81b41fe76cff0b 100644 --- a/regression-test/suites/nereids_p0/system/test_query_sys.groovy +++ b/regression-test/suites/nereids_p0/system/test_query_sys.groovy @@ -41,20 +41,12 @@ suite("test_query_sys", "query,p0") { // INFORMATION_SCHEMA sql "SELECT table_name FROM INFORMATION_SCHEMA.TABLES where table_schema=\"nereids_test_query_db\" and TABLE_TYPE = \"BASE TABLE\" order by table_name" sql "SELECT COLUMN_NAME, DATA_TYPE, IS_NULLABLE, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = \"${tableName}\" AND table_schema =\"nereids_test_query_db\" AND column_name LIKE \"k%\"" - - // test version() - sql "set enable_nereids_planner=false" - def v1 = sql "select version()" - sql "set enable_nereids_planner=true" - def v2 = sql "select version()" - assertEquals(v1, v2) test { sql "select random(random());" exception "The param of rand function must be literal" } - sql "set enable_nereids_planner=false" sql """ CREATE TABLE IF NOT EXISTS `test_random` ( fcst_emp varchar(128) NOT NULL diff --git a/regression-test/suites/nereids_rules_p0/cte/test_cte_filter_pushdown.groovy b/regression-test/suites/nereids_rules_p0/cte/test_cte_filter_pushdown.groovy index 6ed56dfaa313af..258e220a4a65d0 100644 --- a/regression-test/suites/nereids_rules_p0/cte/test_cte_filter_pushdown.groovy +++ b/regression-test/suites/nereids_rules_p0/cte/test_cte_filter_pushdown.groovy @@ -18,6 +18,7 @@ suite("test_cte_filter_pushdown") { sql "SET enable_nereids_planner=true" sql "SET enable_pipeline_engine=true" sql "SET enable_fallback_to_original_planner=false" + sql "set runtime_filter_type=2;" sql "set ignore_shape_nodes='PhysicalDistribute, PhysicalProject'" sql "set disable_nereids_rules=PRUNE_EMPTY_PARTITION" diff --git a/regression-test/suites/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.groovy b/regression-test/suites/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.groovy index bbf3dbbe8ee2de..fc38e3be3372f0 100644 --- a/regression-test/suites/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.groovy +++ b/regression-test/suites/nereids_rules_p0/eliminate_outer_join/eliminate_outer_join.groovy @@ -24,6 +24,7 @@ suite("eliminate_outer_join") { sql "set disable_nereids_rules=PRUNE_EMPTY_PARTITION" sql 'set be_number_for_test=3' sql "set enable_parallel_result_sink=false;" + sql "set disable_join_reorder=true;" sql """ DROP TABLE IF EXISTS t diff --git a/regression-test/suites/nereids_rules_p0/grouping_sets/valid_grouping.groovy b/regression-test/suites/nereids_rules_p0/grouping_sets/valid_grouping.groovy deleted file mode 100644 index 624fc7e9f159cc..00000000000000 --- a/regression-test/suites/nereids_rules_p0/grouping_sets/valid_grouping.groovy +++ /dev/null @@ -1,54 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("valid_grouping"){ - - // this suite test legacy planner - sql "set enable_nereids_planner=false" - - sql "drop table if exists valid_grouping" - sql """ - CREATE TABLE `valid_grouping` ( - `a` INT NULL, - `b` VARCHAR(10) NULL, - `c` INT NULL, - `d` INT NULL - ) ENGINE=OLAP - DUPLICATE KEY(`a`, `b`) - DISTRIBUTED BY RANDOM BUCKETS AUTO - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ); - """ - sql "insert into valid_grouping values(1,'d2',3,5);" - test { - sql """select - b, 'day' as DT_TYPE - from valid_grouping - group by grouping sets ( (grouping_id(b)),(b));""" - exception("GROUP BY expression must not contain grouping scalar functions: grouping_id(`b`)") - } - - test { - sql """select - b, 'day' as DT_TYPE - from valid_grouping - group by grouping sets ( (grouping(b)),(b));""" - exception("GROUP BY expression must not contain grouping scalar functions: grouping(`b`)") - } - -} \ No newline at end of file diff --git a/regression-test/suites/nereids_rules_p0/mv/availability/materialized_view_switch.groovy b/regression-test/suites/nereids_rules_p0/mv/availability/materialized_view_switch.groovy index 1012d84434ef84..97d9325d959ddc 100644 --- a/regression-test/suites/nereids_rules_p0/mv/availability/materialized_view_switch.groovy +++ b/regression-test/suites/nereids_rules_p0/mv/availability/materialized_view_switch.groovy @@ -151,8 +151,10 @@ suite("materialized_view_switch") { sql """ DROP MATERIALIZED VIEW IF EXISTS mv_name_1""" sql "SET enable_materialized_view_rewrite=false" - async_mv_rewrite_fail(db, mv_name, query, "mv_name_2") + create_async_mv(db, "mv_name_2", mv_name) + mv_not_part_in(query, "mv_name_2") sql """ DROP MATERIALIZED VIEW IF EXISTS mv_name_2""" + sql "SET enable_materialized_view_rewrite=true" async_mv_rewrite_success(db, mv_name, query, "mv_name_3") sql """ DROP MATERIALIZED VIEW IF EXISTS mv_name_3""" diff --git a/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy b/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy index 8db90bc40ebaa3..2cc50eafd378fe 100644 --- a/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy +++ b/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy @@ -533,7 +533,7 @@ suite("partition_mv_rewrite_dimension_self_conn") { // predicate compensate def predicate_mv_stmt_1 = """ - select t1.l_shipdatE, t2.l_shipdate, t1.l_partkey + select t1.l_shipdatE, t2.l_shipdate as l_shipdate_t2, t1.l_partkey from lineitem_self_conn as t1 inner join lineitem_self_conn as t2 on t1.l_orderkey = t2.l_orderkey diff --git a/regression-test/suites/nereids_rules_p0/mv/union_all_compensate/union_all_compensate.groovy b/regression-test/suites/nereids_rules_p0/mv/union_all_compensate/union_all_compensate.groovy new file mode 100644 index 00000000000000..dbab81ee22a51b --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/union_all_compensate/union_all_compensate.groovy @@ -0,0 +1,345 @@ +package mv.union_all_compensate +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("union_all_compensate") { + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "set runtime_filter_mode=OFF"; + sql "SET ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" + + sql """ + drop table if exists test_table1 + """ + sql """ + CREATE TABLE `test_table1` ( + `data_date` datetime NOT NULL COMMENT '', + `slot_id` varchar(255) NULL, + `num` int NULL + ) ENGINE = OLAP DUPLICATE KEY( + `data_date`, + `slot_id` + ) PARTITION BY RANGE(`data_date`) ( + FROM ("2024-09-01") TO ("2024-09-30") INTERVAL 1 DAY + ) + DISTRIBUTED BY HASH (`data_date`, `slot_id`) BUCKETS 10 + PROPERTIES ( + "file_cache_ttl_seconds" = "0", + "is_being_synced" = "false", + "storage_medium" = "hdd", "storage_format" = "V2", + "inverted_index_storage_format" = "V2", + "light_schema_change" = "true", "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "group_commit_interval_ms" = "10000", + "group_commit_data_bytes" = "134217728", + 'replication_num' = '1' + ); + """ + + sql """ + drop table if exists test_table2 + """ + sql """ + CREATE TABLE `test_table2` ( + `data_date` datetime NOT NULL COMMENT '', + `slot_id` varchar(255) NULL, + `num` int NULL + ) ENGINE = OLAP DUPLICATE KEY( + `data_date`, + `slot_id` + ) PARTITION BY RANGE(`data_date`) ( + FROM ("2024-09-01") TO ("2024-09-30") INTERVAL 1 DAY + ) + DISTRIBUTED BY HASH (`data_date`, `slot_id`) BUCKETS 10 + PROPERTIES ( + "file_cache_ttl_seconds" = "0", "is_being_synced" = "false", + "storage_medium" = "hdd", "storage_format" = "V2", + "inverted_index_storage_format" = "V2", + "light_schema_change" = "true", "disable_auto_compaction" = "false", + "enable_single_replica_compaction" = "false", + "group_commit_interval_ms" = "10000", + "group_commit_data_bytes" = "134217728", + 'replication_num' = '1' + ); + """ + + sql """ + insert into test_table1 values + ('2024-09-11 00:10:00', 'a', 1), + ('2024-09-11 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'b', 1), + ('2024-09-13 00:20:00', 'b', 1), + ('2024-09-13 00:30:00', 'b', 1), + ('2024-09-13 00:20:00', 'b', 1), + ('2024-09-13 00:30:00', 'b', 1), + ('2024-09-14 00:20:00', 'b', 1), + ('2024-09-14 00:30:00', 'b', 1), + ('2024-09-14 00:20:00', 'b', 1), + ('2024-09-14 00:30:00', 'b', 1), + ('2024-09-15 00:20:00', 'b', 1), + ('2024-09-15 00:30:00', 'b', 1), + ('2024-09-15 00:20:00', 'b', 1), + ('2024-09-15 00:30:00', 'b', 1); + """ + + sql """ + insert into test_table2 values + ('2024-09-11 00:10:00', 'a', 1), + ('2024-09-11 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'b', 1), + ('2024-09-13 00:20:00', 'b', 1), + ('2024-09-13 00:30:00', 'b', 1), + ('2024-09-13 00:20:00', 'b', 1), + ('2024-09-13 00:30:00', 'b', 1), + ('2024-09-14 00:20:00', 'b', 1), + ('2024-09-14 00:30:00', 'b', 1), + ('2024-09-14 00:20:00', 'b', 1), + ('2024-09-14 00:30:00', 'b', 1), + ('2024-09-15 00:20:00', 'b', 1), + ('2024-09-15 00:30:00', 'b', 1), + ('2024-09-15 00:20:00', 'b', 1), + ('2024-09-15 00:30:00', 'b', 1); + """ + + sql """analyze table test_table1 with sync""" + sql """analyze table test_table2 with sync""" + + // Aggregate, scalar aggregate, should not compensate union all + sql """ DROP MATERIALIZED VIEW IF EXISTS test_agg_mv""" + sql""" + CREATE MATERIALIZED VIEW test_agg_mv + BUILD IMMEDIATE REFRESH ON MANUAL + partition by(data_date) + DISTRIBUTED BY HASH(data_date) BUCKETS 3 + PROPERTIES( + "refresh_partition_num" = "1", 'replication_num' = '1' + ) + AS + SELECT + date_trunc(t1.data_date, 'day') as data_date, + to_date(t1.data_date) as dt, + t2.slot_id, + sum(t1.num) num_sum + FROM + test_table1 t1 + inner join + test_table2 t2 on t1.data_date = t2.data_date + GROUP BY + date_trunc(t1.data_date, 'day'), + to_date(t1.data_date), + t2.slot_id; + """ + waitingMTMVTaskFinishedByMvName("test_agg_mv") + sql """analyze table test_agg_mv with sync""" + + def query1_0 = + """ + select sum(t1.num) + FROM + test_table1 t1 + inner join + test_table2 t2 on t1.data_date = t2.data_date + where to_date(t1.data_date) >= '2024-09-12'; + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query1_0_before "${query1_0}" + sql """set enable_materialized_view_rewrite = true;""" + mv_rewrite_success(query1_0, "test_agg_mv") + order_qt_query1_0_after "${query1_0}" + + // Data modify + sql """ + insert into test_table1 values + ('2024-09-11 00:10:00', 'a', 1), + ('2024-09-11 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'b', 1); + """ + sql """analyze table test_table1 with sync""" + + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query1_1_before "${query1_0}" + sql """set enable_materialized_view_rewrite = true;""" + mv_rewrite_fail(query1_0, "test_agg_mv") + order_qt_query1_1_after "${query1_0}" + + + // Aggregate, if query group by expression doesn't use the partition column, but the invalid partition is in the + // grace_period, should not compensate union all, but should rewritten successfully + def query2_0 = + """ + select t2.slot_id, + sum(t1.num) + FROM + test_table1 t1 + inner join + test_table2 t2 on t1.data_date = t2.data_date + where to_date(t1.data_date) >= '2024-09-12' + group by t2.slot_id; + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query2_0_before "${query2_0}" + sql """set enable_materialized_view_rewrite = true;""" + sql """ALTER MATERIALIZED VIEW test_agg_mv set("grace_period"="100000");""" + mv_rewrite_success(query2_0, "test_agg_mv") + order_qt_query2_0_after "${query2_0}" + + + // Aggregate, if query group by expression doesn't use the partition column, and the invalid partition is not in the + // grace_period, should not compensate union all, and should rewritten fail + def query3_0 = + """ + select t2.slot_id, + sum(t1.num) + FROM + test_table1 t1 + inner join + test_table2 t2 on t1.data_date = t2.data_date + where to_date(t1.data_date) >= '2024-09-12' + group by t2.slot_id; + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query3_0_before "${query2_0}" + sql """set enable_materialized_view_rewrite = true;""" + sql """ALTER MATERIALIZED VIEW test_agg_mv set("grace_period"="0");""" + mv_rewrite_fail(query2_0, "test_agg_mv") + order_qt_query3_0_after "${query2_0}" + + + // Aggregate, if query group by expression use the partition column, but the invalid partition is in the + // grace_period, should not compensate union all but should rewritten successfully + def query4_0 = + """ + select to_date(t1.data_date), + sum(t1.num) + FROM + test_table1 t1 + inner join + test_table2 t2 on t1.data_date = t2.data_date + where to_date(t1.data_date) >= '2024-09-12' + group by + to_date(t1.data_date); + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query4_0_before "${query4_0}" + sql """set enable_materialized_view_rewrite = true;""" + sql """ALTER MATERIALIZED VIEW test_agg_mv set("grace_period"="100000");""" + mv_rewrite_success(query4_0, "test_agg_mv") + order_qt_query4_0_after "${query4_0}" + + + // Aggregate, if query group by expression use the partition column, and the invalid partition is not in the + // grace_period, should compensate union all, and should rewritten successfully + def query5_0 = + """ + select to_date(t1.data_date), + sum(t1.num) + FROM + test_table1 t1 + inner join + test_table2 t2 on t1.data_date = t2.data_date + where to_date(t1.data_date) >= '2024-09-12' + group by + to_date(t1.data_date); + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query5_0_before "${query4_0}" + sql """ALTER MATERIALIZED VIEW test_agg_mv set("grace_period"="0");""" + sql """set enable_materialized_view_rewrite = true;""" + mv_rewrite_success(query4_0, "test_agg_mv") + order_qt_query5_0_after "${query4_0}" + sql """ DROP MATERIALIZED VIEW IF EXISTS test_agg_mv""" + + + sql """ DROP MATERIALIZED VIEW IF EXISTS test_join_mv""" + sql """ + CREATE MATERIALIZED VIEW test_join_mv + BUILD IMMEDIATE REFRESH ON MANUAL + partition by(data_date) + DISTRIBUTED BY HASH(data_date) BUCKETS 3 + PROPERTIES( + "refresh_partition_num" = "1", + 'replication_num' = '1' + ) + AS + SELECT + date_trunc(t3.data_date, 'day') as data_date, + to_date(t3.data_date) as dt, + t4.slot_id, + t3.num + FROM + test_table1 t3 + left join + test_table2 t4 on t3.data_date = t4.data_date + """ + waitingMTMVTaskFinishedByMvName("test_join_mv") + sql """analyze table test_table1 with sync""" + + // Data modify + sql """ + insert into test_table1 values + ('2024-09-11 00:10:00', 'a', 1), + ('2024-09-11 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'a', 1), + ('2024-09-12 00:20:00', 'b', 1); + """ + sql """analyze table test_join_mv with sync""" + + // Join, if select expression not use the partition column, and the invalid partition is not in the + // grace_period, should union all,and should rewritten successfully + def query6_0 = + """ + select + t4.slot_id, + t3.num + FROM + test_table1 t3 + left join + test_table2 t4 on t3.data_date = t4.data_date + where to_date(t3.data_date) >= '2024-09-12'; + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query6_0_before "${query6_0}" + sql """set enable_materialized_view_rewrite = true;""" + mv_rewrite_success(query6_0, "test_join_mv") + order_qt_query6_0_after "${query6_0}" + + + // Join, if select expression not use the partition column, and the invalid partition is in the + // grace_period, should not compensate union all, and should rewritten successfully + def query7_0 = + """ + select + t4.slot_id, + t3.num + FROM + test_table1 t3 + left join + test_table2 t4 on t3.data_date = t4.data_date + where to_date(t3.data_date) >= '2024-09-12'; + """ + sql """set enable_materialized_view_rewrite = false;""" + order_qt_query7_0_before "${query7_0}" + sql """set enable_materialized_view_rewrite = true;""" + sql """ALTER MATERIALIZED VIEW test_join_mv set("grace_period"="100000");""" + mv_rewrite_success(query7_0, "test_join_mv") + order_qt_query7_0_after "${query7_0}" + sql """ DROP MATERIALIZED VIEW IF EXISTS test_join_mv""" + +} diff --git a/regression-test/suites/nereids_rules_p0/mv/union_rewrite/usercase_union_rewrite.groovy b/regression-test/suites/nereids_rules_p0/mv/union_rewrite/usercase_union_rewrite.groovy index c076d13166cec0..d41f36627aa052 100644 --- a/regression-test/suites/nereids_rules_p0/mv/union_rewrite/usercase_union_rewrite.groovy +++ b/regression-test/suites/nereids_rules_p0/mv/union_rewrite/usercase_union_rewrite.groovy @@ -121,7 +121,7 @@ suite ("usercase_union_rewrite") { } def mv_name = "mv_usercase" - def mv_stmt = """select o_orderdatE, o_shippriority, o_comment, o_orderdate, + def mv_stmt = """select o_orderdatE, o_shippriority, o_comment, o_orderdate as o_orderdate_alias, sum(o_totalprice) as sum_total, max(o_totalpricE) as max_total, min(o_totalprice) as min_total, @@ -139,7 +139,7 @@ suite ("usercase_union_rewrite") { def job_name_1 = getJobName(db, mv_name) waitingMTMVTaskFinished(job_name_1) - def query_stmt = """select o_orderdatE, o_shippriority, o_comment, o_orderdate, + def query_stmt = """select o_orderdatE, o_shippriority, o_comment, o_orderdate as o_orderdate_alias, sum(o_totalprice) as sum_total, max(o_totalpricE) as max_total, min(o_totalprice) as min_total, diff --git a/regression-test/suites/nereids_rules_p0/mv/with_auth/with_select_table_auth.groovy b/regression-test/suites/nereids_rules_p0/mv/with_auth/with_select_table_auth.groovy new file mode 100644 index 00000000000000..d84d0c6ed2d421 --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/with_auth/with_select_table_auth.groovy @@ -0,0 +1,184 @@ +package mv.with_auth +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("with_select_table_auth","p0,auth") { + + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "set runtime_filter_mode=OFF"; + sql "SET ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" + + String user_name = 'with_select_table_auth' + String pwd = 'test1' + try_sql("DROP USER ${user_name}") + sql """CREATE USER '${user_name}' IDENTIFIED BY '${pwd}'""" + + sql """ + drop table if exists orders + """ + sql """ + CREATE TABLE IF NOT EXISTS orders ( + o_orderkey INTEGER NOT NULL, + o_custkey INTEGER NOT NULL, + o_orderstatus CHAR(1) NOT NULL, + o_totalprice DECIMALV3(15,2) NOT NULL, + o_orderdate DATE NOT NULL, + o_orderpriority CHAR(15) NOT NULL, + o_clerk CHAR(15) NOT NULL, + o_shippriority INTEGER NOT NULL, + O_COMMENT VARCHAR(79) NOT NULL + ) + DUPLICATE KEY(o_orderkey, o_custkey) + PARTITION BY RANGE(o_orderdate) ( + PARTITION `day_2` VALUES LESS THAN ('2023-12-9'), + PARTITION `day_3` VALUES LESS THAN ("2023-12-11"), + PARTITION `day_4` VALUES LESS THAN ("2023-12-30") + ) + DISTRIBUTED BY HASH(o_orderkey) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + drop table if exists lineitem + """ + sql""" + CREATE TABLE IF NOT EXISTS lineitem ( + l_orderkey INTEGER NOT NULL, + l_partkey INTEGER NOT NULL, + l_suppkey INTEGER NOT NULL, + l_linenumber INTEGER NOT NULL, + l_quantity DECIMALV3(15,2) NOT NULL, + l_extendedprice DECIMALV3(15,2) NOT NULL, + l_discount DECIMALV3(15,2) NOT NULL, + l_tax DECIMALV3(15,2) NOT NULL, + l_returnflag CHAR(1) NOT NULL, + l_linestatus CHAR(1) NOT NULL, + l_shipdate DATE NOT NULL, + l_commitdate DATE NOT NULL, + l_receiptdate DATE NOT NULL, + l_shipinstruct CHAR(25) NOT NULL, + l_shipmode CHAR(10) NOT NULL, + l_comment VARCHAR(44) NOT NULL + ) + DUPLICATE KEY(l_orderkey, l_partkey, l_suppkey, l_linenumber) + PARTITION BY RANGE(l_shipdate) ( + PARTITION `day_1` VALUES LESS THAN ('2023-12-9'), + PARTITION `day_2` VALUES LESS THAN ("2023-12-11"), + PARTITION `day_3` VALUES LESS THAN ("2023-12-30")) + DISTRIBUTED BY HASH(l_orderkey) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + ) + """ + + sql """ insert into lineitem values + (1, 2, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-12-08', '2023-12-09', '2023-12-10', 'a', 'b', 'yyyyyyyyy'), + (2, 4, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-12-09', '2023-12-09', '2023-12-10', 'a', 'b', 'yyyyyyyyy'), + (3, 2, 4, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-12-10', '2023-12-09', '2023-12-10', 'a', 'b', 'yyyyyyyyy'), + (4, 3, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-12-11', '2023-12-09', '2023-12-10', 'a', 'b', 'yyyyyyyyy'), + (5, 2, 3, 6, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-12-12', '2023-12-12', '2023-12-13', 'c', 'd', 'xxxxxxxxx'); + """ + + sql """ + insert into orders values + (1, 1, 'o', 9.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (1, 1, 'o', 10.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (1, 1, 'o', 10.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (1, 1, 'o', 10.5, '2023-12-08', 'a', 'b', 1, 'yy'), + (2, 1, 'o', 11.5, '2023-12-09', 'a', 'b', 1, 'yy'), + (2, 1, 'o', 11.5, '2023-12-09', 'a', 'b', 1, 'yy'), + (2, 1, 'o', 11.5, '2023-12-09', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 12.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 12.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 12.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (3, 1, 'o', 33.5, '2023-12-10', 'a', 'b', 1, 'yy'), + (4, 2, 'o', 43.2, '2023-12-11', 'c','d',2, 'mm'), + (4, 2, 'o', 43.2, '2023-12-11', 'c','d',2, 'mm'), + (4, 2, 'o', 43.2, '2023-12-11', 'c','d',2, 'mm'), + (5, 2, 'o', 56.2, '2023-12-12', 'c','d',2, 'mi'), + (5, 2, 'o', 56.2, '2023-12-12', 'c','d',2, 'mi'), + (5, 2, 'o', 56.2, '2023-12-12', 'c','d',2, 'mi'), + (5, 2, 'o', 1.2, '2023-12-12', 'c','d',2, 'mi'); + """ + + sql """analyze table lineitem with sync""" + sql """analyze table orders with sync""" + + sql """grant select_priv on ${db}.orders to ${user_name}""" + sql """grant select_priv on ${db}.lineitem to ${user_name}""" + sql """grant select_priv on regression_test to ${user_name}""" + + + sql """drop materialized view if exists mv1;""" + sql """ + CREATE MATERIALIZED VIEW ${db}.mv1 + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + DISTRIBUTED BY RANDOM BUCKETS 1 + PROPERTIES ('replication_num' = '1') + AS + select l_shipdate, o_orderdate, l_partkey, l_suppkey, + sum(o_totalprice) as sum_total, + max(o_totalprice) as max_total, + min(o_totalprice) as min_total, + count(*) as count_all, + bitmap_union(to_bitmap(case when o_shippriority > 1 and o_orderkey IN (1, 3) then o_custkey else null end)) as bitmap_union_basic + from lineitem + left join orders on lineitem.l_orderkey = orders.o_orderkey and l_shipdate = o_orderdate + group by + l_shipdate, + o_orderdate, + l_partkey, + l_suppkey; + """ + + sql """analyze table mv1 with sync""" + + connect(user=user_name, password="${pwd}", url=context.config.jdbcUrl) { + sql "use ${db}" + mv_rewrite_success( + """ + select t1.l_partkey, t1.l_suppkey, o_orderdate, + sum(o_totalprice), + max(o_totalprice), + min(o_totalprice), + count(*), + count(distinct case when o_shippriority > 1 and o_orderkey IN (1, 3) then o_custkey else null end) + from (select * from lineitem where l_shipdate = '2023-12-11') t1 + left join orders on t1.l_orderkey = orders.o_orderkey and t1.l_shipdate = o_orderdate + group by + o_orderdate, + l_partkey, + l_suppkey; + """, + "mv1" + ) + } + + connect(user=user_name, password="${pwd}", url=context.config.jdbcUrl) { + sql "use ${db}" + test { + sql """select * from mv1;""" + exception "denied" + } + } + + sql """drop MATERIALIZED VIEW IF EXISTS ${db}.mv1;""" +} + diff --git a/regression-test/suites/nereids_rules_p0/normalize_window/normalize_window_nullable_agg_test.groovy b/regression-test/suites/nereids_rules_p0/normalize_window/normalize_window_nullable_agg_test.groovy new file mode 100644 index 00000000000000..9617e8bb19cd95 --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/normalize_window/normalize_window_nullable_agg_test.groovy @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("normalize_window_nullable_agg") { + sql "drop table if exists normalize_window_nullable_agg" + sql """create table normalize_window_nullable_agg (a int, b int,c int,d array) distributed by hash(a) + properties("replication_num"="1"); + """ + sql """insert into normalize_window_nullable_agg values(1,2,1,[1,2]),(1,3,2,[3,2]),(2,3,3,[1,5]),(2,2,4,[3,2]),(2,5,5,[5,2]) + ,(2,3,6,[1,2]),(2,5,7,[1,2]),(null,3,8,[1,23]),(null,6,9,[3,2]);""" + qt_max "select max(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_min "select min(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_sum "select sum(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_avg "select avg(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_topn "select topn(c,3) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_topn_array "select topn_array(c,3) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_topn_weighted "select topn_weighted(c,c,3) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_max_by "select max_by(b,c) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_min_by "select min_by(b,c) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_avg_weighted "select avg_weighted(b,a) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_variance "select variance(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_variance_samp "select variance_samp(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_percentile "select PERCENTILE(b,0.5) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_percentile_approx "select PERCENTILE_approx(b,0.99) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_stddev "select stddev(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_stddev_samp "select stddev_samp(b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_corr "select corr(a,b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_covar "select covar(a,b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_covar_samp "select covar_samp(a,b) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_group_concat "select group_concat(cast(a as varchar(10)),',') over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_retention "select retention(a=1,b>2) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_group_bit_and "select group_bit_and(a) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_group_bit_or "select group_bit_or(a) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_group_bit_xor "select group_bit_xor(a) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_group_bitmap_xor "select group_bitmap_xor(to_bitmap(a)) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + qt_sum_foreach "select sum_foreach(d) over(order by c rows between 2 preceding and 1 preceding) from normalize_window_nullable_agg" + + sql "drop table if exists windowfunnel_test_normalize_window" + sql """CREATE TABLE windowfunnel_test_normalize_window ( + `xwho` varchar(50) NULL COMMENT 'xwho', + `xwhen` datetime COMMENT 'xwhen', + `xwhat` int NULL COMMENT 'xwhat' + ) + DUPLICATE KEY(xwho) + DISTRIBUTED BY HASH(xwho) BUCKETS 3 + PROPERTIES ( + "replication_num" = "1" + );""" + + sql """INSERT into windowfunnel_test_normalize_window (xwho, xwhen, xwhat) values ('1', '2022-03-12 10:41:00', 1), + ('1', '2022-03-12 13:28:02', 2), + ('1', '2022-03-12 16:15:01', 3), + ('1', '2022-03-12 19:05:04', 4);""" + //这个目前会core +// qt_window_funnel """select window_funnel(3600 * 3, 'default', t.xwhen, t.xwhat = 1, t.xwhat = 2 ) over (order by xwhat rows +// between 2 preceding and 1 preceding) AS level from windowfunnel_test_normalize_window t;""" + qt_sequence_match "SELECT sequence_match('(?1)(?2)', xwhen, xwhat = 1, xwhat = 3) over (order by xwhat rows between 2 preceding and 1 preceding) FROM windowfunnel_test_normalize_window;" + test { + sql "select group_concat(xwho order by xwhat) over(partition by xwhen) from windowfunnel_test_normalize_window;" + exception "order by is not supported" + } +} \ No newline at end of file diff --git a/regression-test/suites/nereids_rules_p0/partition_prune/test_date_function_prune.groovy b/regression-test/suites/nereids_rules_p0/partition_prune/test_date_function_prune.groovy index c6f122e3c8735b..b0e13260aa2493 100644 --- a/regression-test/suites/nereids_rules_p0/partition_prune/test_date_function_prune.groovy +++ b/regression-test/suites/nereids_rules_p0/partition_prune/test_date_function_prune.groovy @@ -18,10 +18,8 @@ suite("test_date_function_prune") { String db = context.config.getDbNameByFile(context.file) sql "use ${db}" - sql "SET enable_nereids_planner=true" - sql "set runtime_filter_mode=OFF"; + sql "set runtime_filter_mode=OFF" sql "SET ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" - sql "SET enable_fallback_to_original_planner=false" sql "set partition_pruning_expand_threshold=10;" sql "drop table if exists dp" sql """ @@ -105,19 +103,11 @@ suite("test_date_function_prune") { DISTRIBUTED BY HASH(event_day) BUCKETS 4 PROPERTIES("replication_num" = "1"); """ - explain { - sql """ select /*+ SET_VAR(enable_nereids_planner=false) */ * from test_to_date_trunc where date_trunc(event_day, "day")= "2023-08-07 11:00:00" """ - contains("partitions=0/2") - } explain { sql """ select * from test_to_date_trunc where date_trunc(event_day, "day")= "2023-08-07 11:00:00" """ contains("VEMPTYSET") } sql """ insert into test_to_date_trunc values ("20230807000000"); """ - explain { - sql """ select /*+ SET_VAR(enable_nereids_planner=false) */ * from test_to_date_trunc where date_trunc(event_day, "day")= "2023-08-07 11:00:00" """ - contains("partitions=1/2 (p20230807)") - } explain { sql """ select * from test_to_date_trunc where date_trunc(event_day, "day")= "2023-08-07 11:00:00" """ contains("partitions=1/2 (p20230807)") diff --git a/regression-test/suites/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.groovy b/regression-test/suites/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.groovy index 858f39e5e65cf2..2132028b7ba8f3 100644 --- a/regression-test/suites/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.groovy +++ b/regression-test/suites/nereids_rules_p0/push_down_filter/extract_from_disjunction_in_join.groovy @@ -20,7 +20,11 @@ suite("extract_from_disjunction_in_join") { sql "SET enable_fallback_to_original_planner=false" sql "set ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" sql "set disable_nereids_rules=PRUNE_EMPTY_PARTITION" - sql "set runtime_filter_mode=OFF" + sql """ + set runtime_filter_mode=OFF; + set disable_join_reorder=true; + set disable_join_reorder=true; + """ sql "drop table if exists extract_from_disjunction_in_join_t1" diff --git a/regression-test/suites/nereids_syntax_p0/mv/aggregate/agg_sync_mv.groovy b/regression-test/suites/nereids_syntax_p0/mv/aggregate/agg_sync_mv.groovy index b0f384c472c848..e63037ced798f7 100644 --- a/regression-test/suites/nereids_syntax_p0/mv/aggregate/agg_sync_mv.groovy +++ b/regression-test/suites/nereids_syntax_p0/mv/aggregate/agg_sync_mv.groovy @@ -453,7 +453,9 @@ suite("agg_sync_mv") { createMV("""create materialized view mv_sync48 as select id, var_pop(kint) from agg_mv_test group by id order by id;""") explain { sql("select id, var_pop(kint) from agg_mv_test group by id order by id;") - contains "(mv_sync47)" + check { result -> + result.contains("(mv_sync47)") || result.contains("(mv_sync48)") + } } qt_select_var_pop_mv """select id, var_pop(kint) from agg_mv_test group by id order by id;""" @@ -484,25 +486,23 @@ suite("agg_sync_mv") { } qt_select_window_funnel_mv """select id, window_funnel(3600 * 3, 'default', kdtm, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" - // map_agg is not supported yet - // qt_select_map_agg """select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""" - // sql """drop materialized view if exists mv_sync52 on agg_mv_test;""" - // createMV("""create materialized view mv_sync52 as select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""") - // explain { - // sql("select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;") - // contains "(mv_sync52)" - // } - // qt_select_map_agg_mv """select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""" + qt_select_map_agg """select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync52 on agg_mv_test;""" + createMV("""create materialized view mv_sync52 as select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""") + explain { + sql("select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;") + contains "(mv_sync52)" + } + qt_select_map_agg_mv """select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""" - // array_agg is not supported yet - // qt_select_array_agg """select id, array_agg(kstr) from agg_mv_test group by id order by id;""" - // sql """drop materialized view if exists mv_sync53 on agg_mv_test;""" - // createMV("""create materialized view mv_sync53 as select id, array_agg(kstr) from agg_mv_test group by id order by id;""") - // explain { - // sql("select id, array_agg(kstr) from agg_mv_test group by id order by id;") - // contains "(mv_sync53)" - // } - // qt_select_array_agg_mv """select id, array_agg(kstr) from agg_mv_test group by id order by id;""" + qt_select_array_agg """select id, array_agg(kstr) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync53 on agg_mv_test;""" + createMV("""create materialized view mv_sync53 as select id, array_agg(kstr) from agg_mv_test group by id order by id;""") + explain { + sql("select id, array_agg(kstr) from agg_mv_test group by id order by id;") + contains "(mv_sync53)" + } + qt_select_array_agg_mv """select id, array_agg(kstr) from agg_mv_test group by id order by id;""" qt_select_retention """select id, retention(kdtm = '2012-03-11', kdtm = '2012-03-12') from agg_mv_test group by id order by id;""" sql """drop materialized view if exists mv_sync54 on agg_mv_test;""" @@ -534,19 +534,6 @@ suite("agg_sync_mv") { sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "insert into agg_mv_test select * from agg_mv_test;" - sql "set parallel_pipeline_task_num=1" qt_test "select kbint, map_agg(id, kstr) from agg_mv_test group by kbint order by kbint;" } diff --git a/regression-test/suites/nereids_syntax_p0/window_function.groovy b/regression-test/suites/nereids_syntax_p0/window_function.groovy index 209729b935894b..9a427d1019887d 100644 --- a/regression-test/suites/nereids_syntax_p0/window_function.groovy +++ b/regression-test/suites/nereids_syntax_p0/window_function.groovy @@ -201,4 +201,43 @@ suite("window_function") { from (select 1 as dt,2 as dt2)t group by dt,dt2""" + + sql """ DROP TABLE IF EXISTS table_200_undef_partitions2_keys3_properties4_distributed_by53 """ + sql """ + create table table_200_undef_partitions2_keys3_properties4_distributed_by53 ( + pk int, + col_boolean_undef_signed boolean null , + col_boolean_undef_signed_not_null boolean not null , + col_tinyint_undef_signed tinyint null , + col_tinyint_undef_signed_not_null tinyint not null , + col_smallint_undef_signed smallint null , + col_smallint_undef_signed_not_null smallint not null , + col_int_undef_signed int null , + col_int_undef_signed_not_null int not null , + col_bigint_undef_signed bigint null , + col_bigint_undef_signed_not_null bigint not null , + col_float_undef_signed float null , + col_float_undef_signed_not_null float not null , + col_double_undef_signed double null , + col_double_undef_signed_not_null double not null , + col_decimal_10_0__undef_signed decimal(10,0) null , + col_decimal_10_0__undef_signed_not_null decimal(10,0) not null , + col_decimal_16_10__undef_signed decimal(16,10) null , + col_decimal_16_10__undef_signed_not_null decimal(16,10) not null , + col_decimal_37__12__undef_signed decimal(37, 12) null , + col_decimal_37__12__undef_signed_not_null decimal(37, 12) not null , + col_decimal_17_0__undef_signed decimal(17,0) null , + col_decimal_17_0__undef_signed_not_null decimal(17,0) not null , + col_decimal_8_4__undef_signed decimal(8,4) null , + col_decimal_8_4__undef_signed_not_null decimal(8,4) not null , + col_decimal_9_0__undef_signed decimal(9,0) null , + col_decimal_9_0__undef_signed_not_null decimal(9,0) not null + ) engine=olap + UNIQUE KEY(pk) + distributed by hash(pk) buckets 10 + properties("replication_num" = "1"); + insert into table_200_undef_partitions2_keys3_properties4_distributed_by53(pk,col_boolean_undef_signed,col_boolean_undef_signed_not_null,col_tinyint_undef_signed,col_tinyint_undef_signed_not_null,col_smallint_undef_signed,col_smallint_undef_signed_not_null,col_int_undef_signed,col_int_undef_signed_not_null,col_bigint_undef_signed,col_bigint_undef_signed_not_null,col_float_undef_signed,col_float_undef_signed_not_null,col_double_undef_signed,col_double_undef_signed_not_null,col_decimal_10_0__undef_signed,col_decimal_10_0__undef_signed_not_null,col_decimal_16_10__undef_signed,col_decimal_16_10__undef_signed_not_null,col_decimal_37__12__undef_signed,col_decimal_37__12__undef_signed_not_null,col_decimal_17_0__undef_signed,col_decimal_17_0__undef_signed_not_null,col_decimal_8_4__undef_signed,col_decimal_8_4__undef_signed_not_null,col_decimal_9_0__undef_signed,col_decimal_9_0__undef_signed_not_null) values (0,null,false,-128,0,-5521,32767,870553334,1221220826,0,7213100385307283524,77.73622,40.123,43.329228412740655,20.489504075961925,300.343,61.0185,44.1855,40.123,96.0985,40.123,100.02,300.343,null,300.343,24.0980,86.0793),(1,false,true,127,-71,10613,-2378,-2147483648,-1735831341,4688356,1,40.123,65.62778,100.02,8.156179706539959,81.0185,300.343,300.343,100.02,null,300.343,300.343,300.343,40.123,100.02,39.0762,40.123),(2,null,true,2,127,245,4267,147483648,1783793654,-5281705981187589786,245,59.17273,40.123,11.811660493368755,19.455356365825995,40.123,100.02,100.02,40.123,26.1257,49.1309,5.0953,40.123,300.343,300.343,100.02,66.0149),(3,true,true,84,-128,8528,32767,-2056120424,147483648,null,20240803,50.7821,40.123,300.343,40.00619325266508,null,38.0387,100.02,30.0539,100.02,100.02,null,52.1752,87.1266,69.0995,null,56.0744),(4,true,false,127,63,-24034,0,32679,722930907,245,-10701,300.343,100.02,93.40872726548184,71.3886552262586,300.343,28.1994,45.0914,40.123,null,100.02,null,40.123,100.02,300.343,300.343,34.0277),(5,true,true,-128,-13,null,-10170,980938523,-2028389349,300.343,1502288110756366604,73.54129,40.123,null,84.84453556483243,6.1218,67.1502,36.0493,300.343,40.123,23.0992,49.0178,74.1497,6.0665,40.123,76.0229,92.0806),(6,true,false,null,73,245,-6365,365955659,-825168545,1,32679,20.548183,39.469467,36.399472882786114,63.61718689674476,300.343,3.1746,null,29.1827,null,37.0948,300.343,52.1278,null,100.02,43.1059,40.123),(7,false,true,4,1,245,30104,-966977318,147483648,-6472164452822149829,1,99.48591,40.123,12.81888725588849,40.123,300.343,40.123,300.343,300.343,40.123,300.343,300.343,75.0332,41.1584,300.343,94.0808,100.02),(8,true,false,-128,7,-9341,0,273264246,10153427,-593,32679,null,40.123,47.5996286613163,300.343,100.02,300.343,61.1669,40.123,40.123,83.0629,6.0006,300.343,100.02,100.02,40.123,100.02),(9,true,true,-73,-127,-18853,1007,-2147483648,484409610,7571241902674251552,147483648,88.81223,90.67181,46.09140924068056,45.91107758266007,14.0974,35.0919,60.0558,100.02,null,300.343,null,84.1840,100.02,18.0133,null,4.1655),(10,false,false,69,5,0,245,-1490527072,-1493840028,-3377192489409378222,-6324580157439333282,87.16578,81.2785,300.343,45.77483384423837,18.1387,100.0525,300.343,22.0678,300.343,40.123,40.123,300.343,null,300.343,300.343,21.0889),(11,null,true,127,1,8552,8908,619538385,-1922549998,1,32679,40.123,88.807396,33.08436686203323,73.58052266548373,300.343,100.02,null,300.343,300.343,20.0532,90.1112,40.123,16.1853,100.02,100.02,300.343),(12,false,false,-106,0,0,0,32679,-843142978,300.343,300.343,18.288996,40.123,40.123,16.216679762946892,45.0787,100.02,95.1690,29.1696,100.02,300.343,45.1816,48.0997,null,97.0816,79.0883,83.1233),(13,null,true,1,68,0,-6288,26755852,2147483647,8178343,1,40.123,52.127373,40.123,40.606420235269525,2.1082,100.0243,40.123,26.1287,100.02,53.0670,40.123,100.02,null,300.343,81.1487,40.123),(14,true,false,-128,1,0,-7988,-1877117050,1882515617,147483648,-16312,87.14053,300.343,34.137315647747215,18.4177490944626,16.0717,40.123,100.02,92.1450,100.02,100.02,40.123,61.0075,53.1299,2.0102,87.0173,77.1927),(15,null,true,-128,1,20309,245,-249173211,-661830642,0,300.343,31.596346,17.814812,1.8288288462895608,100.02,40.123,100.02,40.123,40.123,100.02,300.343,40.123,300.343,40.123,73.0959,null,58.1723),(16,null,true,-28,2,32767,4998,147483648,32679,300.343,7,8.024016,19.004923,95.10220072797654,300.343,null,15.0435,12.1656,35.0773,19.1281,70.1733,82.1688,60.0645,69.1016,300.343,300.343,36.0207),(17,null,false,-128,-125,20665,-1816,null,-2147483648,300.343,20240803,100.02,15.448185,16.059618185812642,40.123,300.343,84.0546,300.343,40.123,300.343,100.02,300.343,24.1521,87.0186,100.02,100.02,86.0008),(18,false,false,127,0,-32768,245,709619843,-1187168694,1,245,26.72707,14.693371,51.950037154435954,100.02,61.0306,34.0408,26.1877,300.343,300.343,40.123,300.343,300.343,52.0132,100.02,68.0395,300.343),(19,null,true,127,7,-4902,-1992,2049362398,2147483647,20240803,1,40.123,92.96219,95.76594950639095,40.123,25.1403,85.1143,300.343,100.02,300.343,50.1683,null,67.1558,null,38.1055,100.02,40.123),(20,false,false,null,1,-5586,32767,-602231030,-468313281,31525,245,40.123,14.512555,62.640129358060165,23.940911286338412,300.343,71.1193,96.0935,95.0263,88.0094,100.02,73.1830,40.123,300.343,40.123,85.1319,100.02),(21,true,false,-1,8,868,21773,32679,-800848822,6,147483648,100.02,300.343,76.94700020940235,15.544687318864128,14.0265,300.343,61.0244,40.123,100.02,44.1985,19.0521,300.343,300.343,300.343,45.0813,62.0244),(22,false,false,127,-128,-32768,17278,1283172518,147483648,1,300.343,87.25913,24.94464,null,53.39684343837884,40.123,40.123,20.1610,300.343,38.1763,300.343,91.0316,24.1294,40.123,91.1445,300.343,30.0139),(23,null,false,-82,5,27925,-32768,147483648,398471216,-6320063,147483648,43.395927,16.977839,40.123,82.89956387452517,300.343,100.02,40.123,0.1926,98.1841,100.02,300.343,99.1989,86.0253,40.123,null,300.343),(24,true,false,83,127,30205,-32768,null,147483648,20240803,20240803,12.581072,100.02,100.02,100.02,97.1145,25.0743,34.0678,100.02,40.123,20.1024,96.1827,300.343,300.343,24.1978,300.343,4.0505),(25,false,false,113,-128,-4618,-21467,-119247765,174989684,702317944702217520,147483648,40.123,300.343,58.560202762643996,300.343,null,100.02,52.0910,100.02,3.0218,36.1052,50.1050,40.123,null,40.123,null,81.0179),(26,true,true,1,3,-20697,0,2147483647,620075510,32679,300.343,40.123,8.281405,40.123,97.91570642580545,11.1693,100.02,null,2.0452,300.343,25.1669,300.343,300.343,70.0238,43.1819,null,15.0432),(27,null,false,127,-44,null,245,null,0,147483648,5,100.02,300.343,89.8450409422548,57.325174157382484,null,63.1898,300.343,100.02,100.02,96.1695,17.1029,5.0219,40.123,300.343,83.0928,40.123),(28,false,true,113,1,0,-6709,-1425882860,-2087598432,3,245,100.02,100.02,40.123,60.0672399537993,null,40.123,null,41.0202,100.02,300.343,51.1719,22.0281,300.343,40.123,300.343,72.0791),(29,null,true,127,1,null,-22278,2147483647,1840700770,300.343,147483648,null,100.02,300.343,14.95002600079256,40.123,45.0593,97.0833,35.1485,62.1246,100.02,300.343,64.0744,76.1151,100.02,100.02,40.123),(30,false,true,33,-42,-32768,16362,32679,1006088622,300.343,32679,59.80597,40.123,null,100.02,21.1295,60.0501,null,300.343,300.343,100.02,40.123,300.343,100.02,100.02,300.343,40.123),(31,null,true,1,1,245,245,-705023047,32679,8,32679,75.33006,55.79653,300.343,47.96342481868689,100.02,50.1482,100.02,40.123,19.1875,40.123,300.343,54.0214,55.1434,100.0140,100.02,20.1242),(32,true,true,-49,127,-28555,245,32679,-90829271,20240803,547072466793656360,300.343,300.343,24.174808167853868,89.8900684249949,99.0005,44.0427,63.1797,27.0327,41.1618,72.1580,100.02,69.0721,300.343,300.343,12.0697,300.343),(33,true,true,35,127,-21581,18054,-1002479747,-1728926153,300.343,32679,40.834206,300.343,40.123,88.77366326336902,73.0084,85.1595,100.02,300.343,49.1751,67.0464,87.0275,47.1497,19.0097,47.0830,60.1890,40.123),(34,true,true,1,0,2277,-3386,1594446117,147483648,20240803,5499960,71.92467,300.343,100.02,53.410688011061026,89.0782,3.1433,3.0984,100.02,55.1683,100.02,74.0873,40.123,300.343,100.02,78.0152,40.123),(35,null,false,null,-55,null,-10974,32679,493636356,1554461046393220425,-27645,40.123,100.02,100.02,100.02,300.343,69.1455,100.02,100.02,100.02,40.123,39.0987,66.1241,null,2.0170,300.343,100.02),(36,null,true,27,-20,32767,-248,2059692545,-2147483648,6601532,0,300.343,300.343,300.343,71.8771269520815,null,100.02,85.0821,55.0036,100.02,24.0011,12.1437,78.0906,19.1944,300.343,44.0017,300.343),(37,false,true,1,8,-27173,245,147483648,93506455,300.343,0,40.123,300.343,1.0040607970764792,100.02,40.123,300.343,97.0175,100.02,80.1873,87.0884,17.0957,42.1547,300.343,300.343,7.1573,93.1389),(38,false,true,28,-42,32767,-18984,950068895,-1750955934,1,2,59.211044,1.5997872,null,39.34886398502719,40.123,40.123,null,100.02,300.343,100.02,100.02,21.1855,80.0006,69.0869,80.0807,36.0612),(39,false,false,-128,1,28786,-14044,147483648,-2147483648,245,300.343,63.86071,300.343,46.28329660185213,96.36479559210424,91.0924,300.343,61.0974,40.123,98.1421,100.02,40.123,300.343,40.123,100.02,23.0575,100.0449),(40,false,true,4,-42,0,12636,0,799928425,18859,-2371,100.02,97.34173,29.35646567962841,19.668686120910028,100.02,40.123,300.343,100.02,40.123,100.02,100.02,74.0232,61.1506,300.343,300.343,82.1167),(41,true,false,31,2,-14397,-26164,2147483647,-2055922720,14752,20240803,31.928955,11.286691,77.33833415128589,300.343,100.02,100.02,100.02,56.0428,40.123,40.1015,null,300.343,40.123,18.1702,39.1213,100.02),(42,false,true,1,-128,-10836,31207,null,-401468030,32679,-4893513,100.02,17.293558,40.123,51.54822524103674,100.02,40.123,100.1889,90.0092,null,8.1551,91.0819,100.02,300.343,62.1251,12.1637,40.123),(43,false,true,-50,127,245,-9499,-593434204,-2147483648,-3197120,32679,40.123,99.54466,49.23737153185137,65.06185744929992,43.1605,47.1822,300.343,40.123,86.1185,100.02,88.1995,6.0865,40.123,40.123,78.0026,40.123),(44,true,false,121,5,245,0,-1807646722,-1169080773,4178459,29100,95.27716,70.122375,35.4276186393606,300.343,4.1364,57.0360,3.1043,100.02,300.343,52.1051,null,48.1964,40.123,17.1598,null,40.123),(45,null,true,0,-102,1562,-29208,-308404284,2147483647,1,300.343,300.343,100.02,84.19632755530832,57.7169107222678,40.123,40.123,100.02,300.343,65.0476,96.0983,300.343,40.123,52.1000,100.02,22.0141,40.123),(46,false,false,-128,1,32767,-32768,147483648,862317003,-6345173,7,73.13168,55.965034,100.17342735668034,81.0433792157518,8.0514,24.0488,7.1562,300.343,null,58.0156,100.02,49.1321,100.02,54.1186,69.1003,100.02),(47,true,false,3,127,-2428,18058,147483648,259435153,1,-7360468428682842680,40.123,100.02,300.343,40.123,null,40.123,100.02,40.123,32.1581,300.343,null,73.0453,2.1577,29.1375,30.1488,40.123),(48,null,true,1,0,null,-18743,2147483647,-233529762,6,147483648,100.02,300.343,null,28.832947615497797,40.123,40.123,300.343,51.0908,65.0196,100.02,47.0712,100.02,14.1091,13.1306,null,100.02),(49,false,true,1,126,0,245,1404418041,-2147483648,-27417,245,40.123,300.343,14.803292159939625,37.776521406336386,40.123,43.0997,null,100.02,28.1714,39.1745,100.02,78.1121,85.1018,40.123,null,40.123),(50,true,true,-128,0,17506,-19233,928060045,-968094382,147483648,147483648,null,40.123,22.58245854456932,87.60945944280742,4.0550,7.1097,300.343,37.1549,300.343,300.343,74.1063,47.1902,100.02,300.343,94.1231,70.1391),(51,true,true,null,6,12355,20972,-420833573,1367773643,-3890253,8387476689501436345,100.02,300.343,87.05410559516945,100.02,5.1219,300.343,19.0517,300.343,3.1077,57.0513,null,100.02,83.0944,40.123,null,6.1304),(52,true,true,127,6,0,-22756,654719123,32679,null,-3080718,null,43.153564,null,63.062200847332235,64.1757,10.1427,29.0090,300.343,64.1496,40.123,43.1490,100.02,69.1899,80.1473,null,3.0733),(53,true,true,1,0,6830,-28441,-2147483648,963503490,147483648,3170207,83.754814,1.8227811,100.02,58.451635465970874,62.0259,88.0066,24.1511,300.343,null,40.123,300.343,70.1108,96.1089,76.1201,100.02,300.343),(54,false,true,20,-128,-32768,-29432,32679,2147483647,32679,20240803,76.52722,98.02416,63.21387051988944,40.123,100.02,300.343,12.1308,100.02,40.123,300.343,300.343,87.0184,100.02,300.343,44.1406,100.02),(55,false,true,null,117,30122,16666,-1550173665,-1494870150,null,1482819358272650863,300.343,76.16598,77.21551946423071,87.47986248815684,40.123,300.343,null,75.0855,49.1550,300.343,85.1219,40.0629,100.02,1.1426,300.343,98.1630),(56,null,true,127,0,18042,32767,0,2147483647,32679,20240803,100.02,26.286163,5.183864698359465,60.419389468392346,40.123,100.02,34.1515,72.1051,19.1828,300.343,300.343,40.123,100.02,40.123,300.343,100.02),(57,null,false,0,25,0,-32768,1668120939,-2147483648,245,300.343,8.957483,100.02,74.04929670728133,100.02,300.343,83.0333,40.123,91.1427,93.1833,40.123,40.123,59.0849,40.123,100.02,100.02,63.0824),(58,false,false,null,6,-12627,25341,32679,32679,245,-18848,14.538594,92.61009,28.04025495599968,300.343,null,40.123,22.0143,300.343,33.1907,40.123,100.02,300.343,39.0329,61.1527,5.1240,40.123),(59,null,true,9,-95,-32768,-24069,-1047251084,463155489,245,300.343,24.820452,40.123,43.74778099591234,55.539845373255744,70.0059,100.02,55.0268,100.02,40.123,40.123,300.343,40.123,100.02,40.123,100.02,100.02),(60,true,false,-128,-128,15805,24664,null,2147483647,300.343,7,49.240326,28.58755,78.19783953232441,26.200519632833316,100.02,96.1841,300.343,40.123,9.0768,100.02,81.0950,100.02,78.1179,100.02,null,40.123),(61,false,true,3,1,-13860,0,-2147483648,-1616672647,147483648,1,300.343,40.123,40.123,73.06602805164309,100.02,80.0096,null,70.0641,null,7.0982,83.0570,40.123,56.0610,83.0714,20.1656,100.02),(62,null,false,127,0,31992,-16832,1579711918,2147483647,300.343,6101263,64.585266,59.56007,83.69332124356997,100.02,null,40.123,4.1461,100.02,40.123,300.343,null,40.123,28.0577,300.343,300.343,100.02),(63,true,false,-128,0,-4159,245,147483648,41757331,1,20240803,100.02,100.02,65.58738442400104,89.32240697703746,31.1748,80.1877,40.123,300.343,91.0985,29.0750,86.0323,100.02,100.02,100.02,80.1498,40.123),(64,false,true,1,127,-16557,0,-1985104257,0,9,1,100.02,300.343,300.343,100.02,300.343,5.0042,null,40.1111,100.02,3.0885,null,86.0379,100.02,300.343,null,40.123),(65,false,false,0,3,32767,28823,null,32679,-21802,0,300.343,49.534836,59.37854469033925,300.343,null,83.1741,15.1613,40.123,84.0770,88.0951,19.0622,40.123,300.343,48.0820,100.02,300.343),(66,null,false,126,0,-32048,-27785,-2147483648,2147483647,147483648,245,81.342476,17.529161,80.2793141983029,100.82156374867995,88.1292,93.0266,40.123,300.343,29.1484,100.02,66.1397,300.343,300.343,100.02,null,100.02),(67,true,false,-128,0,245,22355,1606937656,0,5,245,40.123,85.46909,100.02,300.343,100.02,61.1880,74.0491,40.123,57.0988,100.02,40.123,300.343,40.123,26.0711,13.1249,80.1489),(68,null,false,3,1,-5723,32767,-2147483648,1998585079,245,147483648,0.8302286,100.02,100.02,29.91286703548176,62.1426,44.1871,95.1079,40.123,83.1947,100.02,null,300.343,40.123,100.02,40.123,89.0914),(69,true,false,null,-128,-22285,17637,2139352095,-1026889323,20240803,21038,40.123,40.123,71.2479317971375,100.02,300.343,3.1012,null,25.0630,300.343,8.0205,null,300.343,40.123,12.0072,40.123,48.1284),(70,null,false,-128,0,null,32767,866151793,0,300.343,1,100.02,22.772745,100.02,40.123,300.343,300.343,100.02,40.1362,85.1045,57.0423,93.1925,48.1466,40.123,42.1387,40.123,100.02),(71,true,false,1,0,245,-15199,0,1682957935,1,32679,47.658203,100.02,300.343,40.123,null,300.343,300.343,40.123,300.343,300.343,100.02,28.0038,42.1743,40.123,null,50.0117),(72,null,true,1,31,21669,-1197,0,-2147483648,32679,4528112,33.573883,64.29188,50.92734051547059,15.367888356606223,300.343,100.02,12.1497,57.1954,79.1693,77.1937,26.1772,100.02,300.343,300.343,8.1414,77.0290),(73,false,true,127,127,25706,25329,1023263009,32679,26200,20240803,97.54618,40.123,27.503011983005308,19.97810674796473,null,97.0935,null,100.02,null,100.02,94.1530,93.1843,100.02,40.123,40.123,100.02),(74,null,true,5,-106,-29269,-23856,0,0,-2170976,-32582,40.123,11.028486,2.5183566732160028,44.54309668957869,100.02,300.343,100.02,92.1376,96.0160,100.02,85.1356,4.1063,300.343,84.1108,40.123,100.02),(75,true,false,127,1,-22197,32767,1949766850,-2147483648,null,-5543639,21.571142,100.02,300.343,100.02,300.343,100.02,22.0722,100.02,27.0077,100.02,40.123,91.0051,300.343,300.343,67.0873,100.02),(76,true,false,127,127,245,0,32679,32679,-3846533548135585363,245,100.02,300.343,61.38543879914556,23.881139090062863,300.343,40.123,100.02,100.02,100.02,300.343,null,40.123,54.0558,100.02,55.1602,40.123),(77,false,true,0,0,-28913,0,32679,-2147483648,245,108954822763317083,97.504944,40.123,40.123,27.452781371491614,300.343,9.1958,12.0430,100.02,79.1577,46.1627,300.343,52.0817,40.123,65.1825,300.343,57.0987),(78,null,true,1,-128,19928,15233,0,1906126199,-5827520829230900217,3,100.02,55.513626,40.123,86.45729040432911,null,68.1445,53.0981,40.123,40.123,100.02,52.0057,1.1593,43.1824,75.1936,21.1504,40.123),(79,null,false,62,127,-1375,7079,-1810078488,0,1,-842283644995150167,18.873375,19.812044,0.09239759665896496,50.1599075730793,100.02,100.02,2.0606,100.02,40.123,300.343,12.0767,44.1073,300.343,84.0131,14.1248,40.123),(80,false,false,127,-74,32767,32767,406348391,-1524789913,245,3963615275036134392,40.123,100.02,75.90399227145431,100.02,null,73.0645,40.123,31.0848,300.343,14.1945,11.0300,93.0716,null,40.123,100.02,67.1887),(81,false,false,-128,-115,null,-8824,null,-1274877181,4238095985939191348,147483648,null,50.115902,5.116587898562866,90.55427479754299,null,300.343,null,40.123,100.02,40.123,40.123,100.02,74.1901,21.0636,58.1929,38.1830),(82,false,false,0,127,-10542,1115,703525872,-156071729,245,32679,null,88.95235,40.123,300.343,40.123,90.1234,null,35.0061,300.343,300.343,300.343,300.343,22.0821,60.0915,null,25.1671),(83,true,false,null,-128,-23366,17904,-1845879247,0,147483648,245,56.185734,98.85193,28.791363129340947,69.53273262180916,40.123,40.123,100.02,44.1043,100.02,300.343,100.02,5.0287,81.0042,40.123,300.343,40.123),(84,false,false,-128,127,null,-903,2147483647,-298576443,4326007499518926142,1,300.343,40.123,8.488206885521171,90.22086568911118,79.1075,84.1203,36.0691,100.0122,40.123,7.0557,91.1252,300.343,null,100.02,40.123,300.343),(85,true,true,59,20,-32768,-22364,147483648,1134056516,147483648,1,null,40.123,100.02,50.95124572797081,34.0075,48.1173,5.0180,9.1309,null,11.1694,44.1582,100.02,40.123,47.0938,26.1558,16.0771),(86,true,false,62,100,-13209,0,null,2147483647,-6882957,147483648,null,100.02,34.02659572164404,100.02,34.0098,40.123,50.1584,55.0513,null,26.0938,100.02,40.123,41.0845,82.1065,100.02,82.1410),(87,true,true,1,110,0,7964,0,-1612707728,-15102,147483648,27.3543,56.960598,40.123,300.343,300.343,300.343,51.0405,58.1095,null,300.343,300.343,40.123,40.123,50.1398,0.1660,100.02),(88,null,true,2,2,null,245,-1994755213,-2147483648,20240803,-2055497,33.082787,36.155426,100.02,75.9445455969295,null,300.343,89.0874,40.123,27.0510,85.1684,67.0026,72.1427,18.1930,31.1147,10.0659,11.1875),(89,null,true,18,72,31667,-18537,50385711,-2019108535,-26789,300.343,53.41752,64.563805,15.977002340673545,36.11005242038549,40.123,81.0956,35.0066,14.1764,null,23.0120,null,14.1528,300.343,100.02,84.1084,65.1933),(90,true,true,-128,-106,245,-3202,576364392,1696762574,1801544748428540677,1,null,86.865364,83.52225659799662,300.343,40.123,100.02,1.1245,65.1628,40.123,25.0485,null,100.02,40.123,25.0430,78.0278,1.1572),(91,false,true,96,7,32767,0,-2147483648,32679,8,20240803,31.439945,100.02,40.123,300.343,100.02,300.343,100.02,59.1978,5.0234,27.1617,40.123,38.0460,46.1356,300.343,100.02,18.0073),(92,null,true,-60,-32,-32768,-32768,2147483647,-1707297306,null,9,53.642704,300.343,92.21430934975321,89.51856007304167,100.02,40.1787,100.02,13.1243,300.343,70.0531,72.1695,68.1924,null,33.1963,100.02,10.1940),(93,null,false,null,1,245,-23966,-229883397,892520300,245,20240803,100.02,40.123,100.02,68.07177164188646,100.02,300.343,40.123,40.0709,87.1993,48.1822,84.0300,81.0583,68.1210,70.0513,24.0484,55.0348),(94,false,false,0,126,-5565,16334,-1500369240,2147483647,20240803,1,40.123,40.123,22.409783125381818,53.538538746410985,100.02,92.0354,100.02,300.343,300.343,88.0204,100.02,40.123,null,100.02,100.02,99.0716),(95,true,true,13,-103,-18191,-32768,null,0,300.343,-27590,100.02,46.513206,20.34480996068452,16.2012744082584,null,40.123,47.0734,27.1125,100.02,40.0675,null,40.123,53.1515,40.123,100.02,17.0719),(96,true,false,-25,0,-15441,245,-2147483648,-2094951540,4128659932999713281,-3014477361704267309,1.0252304,300.343,300.343,100.02,300.343,4.0299,null,40.123,null,95.1601,46.0039,31.0319,33.1251,49.1539,null,90.1318),(97,true,true,7,66,0,8597,-2147483648,2082358272,2539,245,40.123,9.645237,null,91.98940397815429,68.0136,76.0214,null,14.0094,300.343,73.1017,40.123,40.123,300.343,100.02,40.123,40.123),(98,false,false,null,-128,2658,0,2147483647,0,null,300.343,8.48215,300.343,300.343,70.31424485294733,100.02,300.343,300.343,40.123,100.02,40.123,100.02,36.0302,100.02,40.123,100.02,300.343),(99,true,false,1,1,null,-32768,-1227233525,0,3,3,45.120365,300.343,40.123,32.08155756681695,null,40.123,100.02,100.02,40.123,300.343,32.1876,95.1981,19.1851,68.1442,77.1439,300.343),(100,true,true,0,127,0,-32768,43026623,-1693013137,5018060789260282465,20240803,39.016026,51.351124,7.0261623993586495,300.343,300.343,100.02,11.0957,100.02,38.0553,40.123,22.1984,40.123,300.343,100.02,88.0393,81.1827),(101,false,true,-128,1,24323,26647,-1219608286,-2147483648,1,20240803,65.799225,28.868416,10.181899475050512,40.123,null,40.123,75.0513,300.343,null,46.1203,40.123,83.1503,39.1853,37.0077,null,300.343),(102,null,true,3,7,245,5533,124152548,193915138,0,8,40.123,100.02,40.123,15.435045819369767,81.0346,40.123,300.343,100.02,null,20.0112,300.343,76.1545,300.343,300.343,40.123,100.02),(103,false,true,0,-100,31066,23479,422438606,-149496060,147483648,5,40.123,79.40801,300.343,39.435633537608105,100.02,300.343,null,92.0144,0.0410,100.02,300.343,76.1794,100.02,36.1003,85.1865,100.02),(104,null,false,-128,-128,8616,32767,147483648,1718075742,null,24977,100.02,79.642685,100.02,26.711461273327185,40.123,81.0067,40.123,18.1762,33.0396,20.1293,42.0193,300.343,300.343,300.343,300.343,98.0503),(105,true,true,127,0,245,9380,147483648,147483648,null,300.343,null,300.343,100.02,100.02,18.1115,100.02,83.0080,82.1976,300.343,100.02,null,100.02,null,75.0449,78.1436,89.1410),(106,true,true,-39,-128,-16575,-14929,-1972652462,147483648,300.343,-2901473,28.91638,100.02,97.73573465631392,76.35445572091163,100.02,40.123,100.02,53.0278,null,62.1835,27.1195,30.1130,null,82.1932,25.1830,40.123),(107,false,true,null,-115,245,3557,1547797325,32679,7451546,1,100.02,9.934288,300.343,40.123,null,62.1662,100.02,40.123,null,64.0502,null,70.1488,100.02,40.123,100.02,46.1974),(108,null,false,81,25,31615,32767,-1569899733,0,-3286441,20240803,40.123,100.02,39.94254836124736,100.02,300.343,60.1307,300.343,97.1733,17.0098,18.1173,100.02,100.02,76.1069,300.343,100.02,40.123),(109,null,true,null,97,null,20625,-1312026698,-2147483648,1,245,100.02,300.343,300.343,2.087018889870059,39.0091,40.123,67.0813,1.1311,100.02,81.0368,52.1543,35.1617,null,300.343,300.343,100.02),(110,null,false,1,-110,-9605,-32768,2147483647,32679,2,32679,100.02,37.999928,47.15762507873273,40.123,69.0217,14.1567,100.02,27.0992,74.0342,100.02,6.0813,52.0089,67.0109,97.0502,40.123,100.02),(111,null,false,53,127,-12609,4725,104458880,-758525622,3013348,-8051978,300.343,85.78309,null,17.86424850123154,100.02,85.0096,300.343,17.0553,100.02,89.0451,68.1888,40.123,2.0136,14.1320,20.0970,95.1267),(112,null,true,1,127,32767,0,0,-181204112,-7769,300.343,300.343,24.86158,null,100.02,12.0528,5.0491,70.0618,300.343,95.1190,50.0241,null,40.123,100.02,40.123,13.0238,300.343),(113,null,false,-103,-54,null,-23247,-2147483648,1565107572,null,147483648,40.123,300.343,69.52143443753269,40.123,40.123,40.123,97.0816,79.1983,null,36.1032,null,300.343,300.343,54.1574,81.0281,12.1005),(114,null,true,127,-128,30108,-10269,1047943244,147483648,32679,1278162926834511692,null,45.2988,null,48.34738019585097,null,40.123,300.343,100.02,100.02,26.1202,79.0876,61.1514,40.123,95.0140,11.0472,40.123),(115,null,true,122,0,26174,245,147483648,-289447525,null,300.343,40.123,300.343,40.123,50.10516812711762,91.1473,40.123,95.1676,40.123,40.123,32.0531,100.02,300.343,90.0932,50.0650,40.123,48.0383),(116,true,true,1,-128,32767,245,-743484773,1133528555,null,-4462,28.942665,78.210526,100.02,100.02,null,40.123,74.0860,300.343,null,7.0398,40.123,22.0092,70.1124,99.0855,100.02,300.343),(117,null,false,127,-76,-6242,9953,-1388529113,1117902118,245,147483648,65.12925,94.87573,300.343,84.08118530362427,300.343,9.0203,50.0958,100.02,100.02,100.02,40.123,100.02,0.0641,300.343,null,3.0179),(118,null,false,-128,-128,-30764,-32768,0,32679,4192011898564589227,1,300.343,48.720806,300.343,300.343,36.1343,40.123,100.02,100.02,40.123,40.123,300.343,300.343,23.0690,99.1259,71.1940,100.02),(119,false,false,null,2,0,-9964,-1638010852,-2147483648,300.343,147483648,null,88.76886,300.343,51.972153632907514,null,65.1584,null,300.343,40.123,6.0150,3.0613,91.1639,65.0057,36.1160,82.0661,100.02),(120,null,false,127,60,-17072,-24305,-695080826,0,245,245,100.02,100.02,40.123,40.123,40.123,60.0610,85.0021,8.0322,100.02,100.02,40.123,45.1733,99.1817,84.0376,55.0536,300.343),(121,null,false,1,0,-2767,-9273,1478135495,32679,2881140859769652250,-15040,77.91241,100.02,89.28503501771756,82.31653292545549,100.02,27.1697,null,300.343,100.02,79.1965,null,100.02,100.02,26.1863,100.02,86.0545),(122,null,false,0,7,-1298,6968,32679,-553231962,300.343,20240803,75.41454,19.38961,100.02,22.87081519143626,null,100.02,300.343,40.123,60.0005,300.343,6.0033,19.0163,300.343,300.343,300.343,40.123),(123,null,false,1,-128,-32459,30567,-2147483648,147483648,300.343,32679,85.95871,300.343,300.343,88.23349841859975,83.1243,40.123,36.0062,28.0244,78.1511,300.343,null,40.123,null,40.123,12.0330,96.0282),(124,false,false,127,-56,16609,29019,1919988745,-2147483648,null,20240803,9.960259,40.123,64.4276470734917,67.69034261730734,100.02,300.343,30.0195,100.02,300.343,300.343,100.02,69.0774,100.02,100.02,52.0927,40.123),(125,false,false,38,1,-11224,-18790,2147483647,2147483647,147483648,245,50.67896,100.02,300.343,40.123,71.1867,17.0300,100.02,95.0291,300.343,33.0339,100.02,50.1826,22.1188,15.0461,300.343,81.0832),(126,false,false,0,-110,32767,-14386,147483648,32679,null,300.343,78.18328,40.123,300.343,100.02,20.0887,40.123,100.02,300.343,40.123,40.123,40.123,64.0699,null,91.0765,300.343,38.1851),(127,true,false,72,-128,-336,30617,1259754652,2108044612,-28047,9,68.56738,300.343,60.94303006453322,100.02,72.0365,300.343,53.0241,100.02,null,80.1020,45.0265,300.343,40.123,40.123,null,18.1104),(128,false,true,-128,0,25675,27385,-112276230,32679,-10413,20240803,88.74651,99.25312,100.02,100.65424694588411,100.02,46.0395,null,100.02,null,95.0482,100.02,300.343,null,61.1176,100.02,300.343),(129,true,true,-3,1,245,11887,null,2147483647,-7400584094873709947,1,14.39657,100.02,300.343,40.3636405460102,null,31.0690,100.02,300.343,null,96.1680,100.02,4.0498,300.343,300.343,300.343,300.343),(130,null,false,null,5,0,575,2147483647,0,-15301,1209491,300.343,91.30643,null,40.123,100.02,300.343,300.343,40.123,100.02,23.1428,100.02,20.1225,100.02,300.343,40.123,40.123),(131,false,true,1,1,20429,29076,-1762767490,591743971,1,32679,40.123,300.343,64.76554944613694,5.929978974610336,88.1899,66.0615,7.0244,300.343,8.1396,100.02,79.1989,55.1193,null,40.123,97.1875,100.02),(132,null,false,1,1,-2841,245,1794737794,249688774,32679,0,40.123,100.02,null,40.123,null,300.343,300.343,300.343,null,26.0946,300.343,82.0601,null,28.0584,300.343,100.02),(133,true,false,-128,2,-15316,3718,32679,-690297108,null,147483648,null,11.527625,null,36.182018543784004,100.1448,300.343,84.0691,94.0152,20.1872,68.1781,20.1761,100.02,4.1596,40.123,38.0850,26.0728),(134,true,true,71,-77,1243,-29476,32679,2147483647,300.343,9,40.123,2.1161988,44.721238188178454,100.02,40.123,75.1330,40.123,4.1789,100.02,47.1459,100.02,52.1343,300.343,12.1044,300.343,100.02),(135,false,false,1,5,-32768,245,832514291,-1854940774,32679,300.343,40.123,59.55407,89.35441854005492,24.042795943672747,300.343,7.1391,40.123,40.123,24.1904,99.1677,14.0423,100.02,100.02,100.02,40.123,300.343),(136,null,true,1,-75,31689,-18322,-285447156,147483648,null,20240803,40.123,40.123,84.0139915724961,64.01879641982227,100.02,40.123,40.123,11.1435,40.123,62.1781,74.0872,21.1737,20.0073,300.343,300.343,100.02),(137,true,true,-128,-112,-4927,-8096,147483648,32679,32679,32679,40.123,58.53123,21.48699460605266,80.99727243439341,85.0584,78.0000,76.1551,300.343,100.02,100.02,94.0008,40.123,300.343,100.02,300.343,28.0385),(138,true,false,-128,75,null,23645,-2147483648,2147483647,245,7428,94.312706,100.02,27.34384759452868,40.123,19.0207,40.123,21.0118,90.1011,40.123,41.1255,300.343,6.0253,300.343,40.123,12.0525,40.1869),(139,false,true,1,8,-24812,31452,-2147483648,526911742,32679,300.343,55.73405,300.343,17.800197355863794,100.02,40.123,100.02,5.1893,100.02,300.343,12.0620,8.0921,300.343,null,300.343,300.343,40.123),(140,false,false,-42,101,-30538,2630,-186234472,-2147483648,245,1,83.33636,49.598263,88.94428964612923,300.343,300.343,100.02,40.123,22.0527,40.123,40.123,68.0934,300.343,65.0857,87.0248,null,40.123),(141,null,true,127,-128,-27595,-12956,2147483647,-1902241305,1533006,11537,0.27623007,300.343,70.0590946918003,100.02,300.343,50.0995,40.123,88.1809,300.343,50.0149,null,79.0088,56.0563,100.02,3.1822,40.123),(142,false,false,0,-128,23715,1796,2147483647,-2073273751,2,32679,40.123,20.084553,38.363932232357705,40.89554612866681,68.0723,100.02,300.343,40.123,null,26.1041,300.343,100.02,25.0477,73.1305,300.343,100.02),(143,null,false,-127,54,4866,-14672,2069236806,164100083,6536064,-27229,38.150673,28.145409,40.123,43.271153634465826,6.0183,87.1456,90.0555,100.02,56.0633,65.1510,300.343,64.0985,96.0445,51.1169,300.343,100.02),(144,null,true,8,1,0,245,32679,0,245,245,300.343,49.1595,null,96.7699852244762,12.0629,47.0668,100.02,79.0583,40.123,76.0360,6.1156,300.343,87.0120,37.0374,20.0107,40.123),(145,false,true,127,127,231,245,0,2147483647,-4155201,147483648,55.094093,67.009796,8.80227098959567,96.97187513932896,87.0709,68.0667,91.1983,63.1604,100.02,100.02,65.0108,90.1654,41.1876,5.0705,26.0387,300.343),(146,false,true,127,9,32767,32767,658418135,-437486193,7,245,null,40.123,100.02,300.343,79.1581,19.1866,40.123,30.1889,300.343,300.343,80.1265,300.343,null,100.02,300.343,300.343),(147,null,true,-11,6,-10644,32767,32679,-1284237248,1,32679,10.248305,50.55562,300.343,34.47185435551129,45.1925,80.1317,100.02,300.343,100.02,300.343,26.1244,300.343,100.02,300.343,null,40.123),(148,true,true,127,-128,-4878,32767,32679,-339538000,482554,4821,71.634605,36.63903,2.365454209953471,22.663615985934936,null,100.02,34.0122,100.02,40.123,300.343,97.0987,28.0643,64.0605,300.343,100.02,5.0883),(149,false,true,0,5,245,60,1824478307,147483648,2,-4476632980072869204,93.32383,42.04267,100.02,51.37715926909345,40.123,60.1327,100.02,100.02,300.343,99.1742,null,40.123,36.1038,8.0276,41.1160,100.02),(150,null,false,null,53,null,8023,1383458937,-401715533,20240803,-27180,null,300.343,63.48460867022761,8.985522002082849,8.0315,59.1740,100.02,300.343,100.02,67.0608,6.0489,51.1445,100.02,40.123,null,40.123),(151,false,true,1,-128,245,245,706650098,-1977203866,null,7,62.289223,300.343,82.39375057254898,1.0657909024939936,300.343,7.1881,82.1928,300.343,100.02,40.123,300.343,100.02,null,73.0651,300.343,300.343),(152,false,true,126,119,-26868,-15616,1587788697,-2147483648,-4924529,300.343,40.123,30.292782,null,300.343,28.1303,300.343,48.0032,300.343,40.0588,40.123,15.0326,300.343,76.0864,40.123,41.0419,55.0078),(153,true,true,6,-128,-32768,32767,2147483647,-107187645,14230,8313294,69.55337,50.10263,70.61695352071436,100.02,300.343,100.02,40.123,300.343,null,77.0521,100.02,300.343,40.123,15.1432,null,4.1525),(154,true,false,null,-128,-32768,-7269,0,2147483647,null,-24117,10.93608,96.51051,13.662109207650328,46.39266611215844,69.1678,40.123,75.1448,20.0209,300.343,100.02,null,300.343,76.1590,86.1793,89.1705,100.02),(155,true,false,1,3,-20965,1397,32679,147483648,-15514,24658,4.333004,36.363426,68.32207447960896,100.02,15.1599,1.0341,40.123,100.02,300.343,300.343,46.1646,100.02,300.343,22.0230,9.0330,100.02),(156,null,true,2,0,245,-8890,-2147483648,-1869054459,null,-5594434,100.02,300.343,null,40.123,40.123,58.1789,300.343,33.0526,55.1202,300.343,null,6.1861,69.1035,40.123,100.02,40.123),(157,null,false,4,0,null,32767,0,32679,300.343,245,74.3095,40.123,100.02,0.9558084828441511,40.123,42.0003,null,97.0759,52.1753,88.0094,100.02,300.343,1.1874,100.02,40.123,62.0600),(158,null,true,-128,-105,-32768,-30205,-2147483648,668511903,-5724613,8,100.02,51.563293,39.63710534303279,100.02,40.123,17.1668,68.1547,91.0505,300.343,55.1339,100.02,40.123,100.02,57.1705,10.1103,100.02),(159,true,true,106,-15,8915,29227,-2147483648,-20489220,245,20240803,97.018875,100.02,300.343,50.08526545127984,null,40.123,300.343,66.1420,40.123,300.343,40.123,40.123,100.02,5.1234,40.123,40.123),(160,null,true,-122,-128,null,-27283,860707403,-1846775647,32679,20240803,54.639565,57.168278,43.51439085008457,9.930887455136805,300.343,29.0291,100.02,100.02,null,100.02,24.1941,300.343,66.1524,13.1061,300.343,76.1610),(161,false,false,55,1,-24856,0,null,273330785,6228192824530271488,-23231,4.166127,100.02,73.47696626472431,15.341997224491761,100.02,27.0868,4.1831,16.1381,100.02,79.0886,66.1674,300.343,20.1287,58.1149,53.0946,90.0471),(162,false,false,0,1,23677,0,2147483647,147483648,32679,300.343,null,82.11983,100.02,68.1183108905539,51.0759,300.343,2.0146,79.0170,1.0469,100.02,33.0449,300.343,2.0509,100.02,89.0122,53.0934),(163,null,true,-20,-80,21710,17802,-2147483648,-44908386,147483648,-30892,69.70228,50.63278,300.343,300.343,8.1179,40.123,49.1359,40.123,100.02,33.1236,0.0994,93.1043,48.1059,40.123,300.343,69.0951),(164,true,false,5,69,245,-32768,null,1122572837,32679,1983361535998042679,40.123,29.683115,45.13713825982192,100.02,null,300.343,300.343,300.343,null,51.0455,100.02,32.0948,null,100.02,3.0196,40.123),(165,true,false,-3,39,-32768,-14223,-407001947,-88610448,300.343,300.343,62.86208,300.343,22.02080052202581,18.865467615423626,300.343,300.343,87.0985,300.343,40.123,100.02,74.0906,40.123,300.343,81.0434,79.0259,22.1417),(166,false,true,96,-17,1109,-24909,1781166156,-1454435420,-8311153686603853007,147483648,53.31313,40.123,36.08041243968696,93.81041771670793,100.02,100.1722,null,40.123,null,1.0363,59.0047,67.1107,42.0603,100.02,300.343,300.343),(167,true,true,127,-70,-21317,-19368,274090080,1552234968,32679,300.343,39.997547,51.8978,58.70529247129719,15.019196105972863,60.0376,48.1921,300.343,76.1680,null,100.02,100.1999,300.343,100.02,40.123,300.343,52.0149),(168,true,false,45,0,-26860,-10177,-1216625010,2032115995,20240803,300.343,69.06438,100.02,32.430365161482186,28.54698088552074,300.343,25.0491,80.1805,14.0454,100.02,40.123,40.123,300.343,64.0859,300.343,69.1063,40.123),(169,false,true,-74,127,32767,-32768,null,1989919147,1,1,64.49969,23.08643,300.343,100.02,86.0643,21.1390,null,300.343,100.02,40.123,100.02,100.02,51.1161,25.1942,97.0069,62.1089),(170,null,false,42,26,-29673,-6767,-2147483648,-1385425906,32679,20240803,100.02,300.343,40.123,40.123,27.0763,40.123,300.343,300.343,22.0888,100.02,null,26.0066,300.343,17.1600,33.0503,67.1291),(171,null,true,6,23,-4581,32767,-2147483648,32679,1639,-810333,32.13477,59.64614,100.02,87.93132379078565,88.1012,60.1940,35.1905,100.02,100.02,100.02,40.123,300.343,300.343,46.1793,300.343,56.0742),(172,true,false,49,-18,10647,32767,0,0,-4705967,995543,39.60285,57.59544,19.87610524684921,17.500848987419083,null,300.343,null,62.1315,300.343,93.1966,7.0544,2.0692,40.123,11.1242,25.1992,40.123),(173,false,false,1,-128,-4029,32767,-1468225329,-1331144674,147483648,300.343,37.145172,82.59502,100.171958220766,73.91801775541255,29.1411,66.0227,null,300.343,300.343,44.1453,300.343,100.0322,36.0108,92.0456,null,100.02),(174,false,false,1,85,24704,-32768,-1700886455,942495967,245,300.343,40.123,100.02,83.07083469187177,83.361740773243,44.1926,100.02,31.1582,40.123,300.343,95.1907,100.02,95.0525,87.1093,70.1932,300.343,44.1647),(175,true,true,0,-2,26480,7005,-354093593,-575936753,147483648,8261132,300.343,300.343,78.90413425966202,79.50226474352503,95.1729,5.1937,34.0443,100.02,100.02,100.02,15.1668,92.0063,100.02,34.0381,32.1136,100.02),(176,null,true,-17,127,null,-13578,-784045807,2007709101,300.343,300.343,null,92.845665,40.123,92.05963372931738,100.0679,100.02,9.0409,28.1107,94.1039,300.343,100.02,100.02,100.02,85.1842,null,75.1707),(177,true,true,1,1,2540,18797,0,-1536075223,147483648,20240803,300.343,52.06142,null,5.738342067117419,300.343,300.343,40.123,69.1197,63.0400,95.0953,10.1670,40.123,null,27.0352,7.0592,100.02),(178,null,true,127,-128,10516,-17295,2147483647,2147483647,147483648,-11385,35.60083,88.3799,95.66123829640617,16.0590735353562,28.1373,300.343,100.02,100.02,2.1455,100.02,100.02,44.1825,6.0106,51.0764,37.1739,100.02),(179,true,true,null,1,-119,32767,147483648,0,null,300.343,54.11735,2.9736354,5.5212872678061204,70.14211472476848,100.02,28.1085,40.123,300.343,100.02,300.343,57.0599,38.0298,97.1411,27.1211,78.1772,19.1982),(180,true,true,1,1,null,245,147483648,1454328724,-15403,300.343,33.222767,40.123,72.79185042565685,59.164033988993005,40.123,24.0907,70.0489,40.123,56.1604,64.0264,300.343,32.1469,300.343,40.123,100.02,68.1032),(181,false,true,6,0,-13357,-23853,-1783523447,206380972,-14933,20240803,null,73.067,43.00425396223879,100.02,57.1678,40.123,300.343,300.343,45.1238,40.123,100.02,40.123,300.343,16.0510,40.123,40.123),(182,true,true,-79,1,13944,245,0,333012856,245,300.343,66.92514,17.140984,15.116051354884547,300.343,7.1818,0.0419,300.343,40.123,41.0495,300.343,40.123,16.1287,0.0291,13.1787,100.02,40.123),(183,false,false,101,-128,245,26140,2111655080,147483648,1,300.343,72.77176,99.59849,89.76221878263951,40.123,65.1212,77.1425,null,100.02,16.0480,100.02,40.123,58.0354,300.343,10.0163,300.343,53.1106),(184,null,false,93,20,-12468,32767,2147483647,-1919350601,32679,245,300.343,300.343,89.97542003460438,83.07797527515272,null,40.123,54.0045,90.1450,40.123,53.0251,40.123,9.0271,100.02,40.123,100.02,300.343),(185,false,false,127,4,-30605,8961,-1603737214,32679,20240803,32679,100.02,73.1095,null,0.2554223846999049,40.123,23.1967,40.123,40.123,300.343,28.1916,100.02,40.123,62.1409,60.1379,300.343,42.1297),(186,null,false,1,-128,-11525,31446,2147483647,0,245,9,84.81564,78.88689,100.02,88.33230973847827,null,30.0019,98.0200,13.0767,30.0451,57.1554,57.0247,43.0648,40.123,300.343,300.343,40.123),(187,false,true,null,127,245,0,2005320865,32679,32679,245,6.6226397,40.123,300.343,22.045765620672245,100.02,40.123,98.0871,16.0154,100.02,40.123,19.1988,1.1923,48.0583,74.0135,null,100.02),(188,true,true,-25,-54,245,-8351,-2054730153,-1400175999,9,-15883,56.278236,49.124416,84.50003047597744,40.123,79.0664,100.02,21.0786,76.0055,40.123,45.1528,59.0995,40.0736,72.0219,36.1395,58.1532,40.123),(189,false,false,127,16,7527,0,2147483647,147483648,32679,20240803,40.123,77.93991,100.02,40.123,51.0606,65.1838,40.123,36.0241,71.0078,300.343,300.343,8.0658,100.02,40.123,20.1972,84.0992),(190,true,true,-98,0,245,0,null,2147483647,3,32679,300.343,56.213825,null,300.343,null,22.1118,22.0907,40.123,98.0671,100.02,100.1521,58.0234,40.123,100.02,40.123,88.0260),(191,true,true,127,-78,245,-32768,983178148,-1796395082,32679,147483648,40.123,40.123,null,40.123,null,100.02,300.343,100.02,null,100.02,300.343,20.0124,89.1252,300.343,40.123,100.02),(192,true,true,68,-128,-3705,13968,1020106271,-484224210,245,7,40.123,98.75088,63.300388151527855,64.63230652910728,null,300.343,null,84.1756,40.123,100.02,40.123,300.343,4.0929,88.1815,100.02,86.0682),(193,null,false,-17,-110,-32768,22171,-2147483648,147483648,5467206147096318931,20240803,72.185394,72.190216,23.628436073359932,72.51017191732922,40.123,43.0124,300.343,25.1762,null,300.343,300.343,28.1133,54.0435,55.0702,23.0567,100.02),(194,false,false,55,0,18123,7302,-1962777107,147483648,19480,1,null,100.02,45.58539012856048,59.57771833366333,82.1464,100.02,43.1109,300.343,100.02,39.1988,2.0388,45.0857,null,25.1965,null,7.1684),(195,false,false,-128,1,14187,245,1996009108,32679,3176594,-1361625,300.343,85.83407,49.41130397186706,78.85429657486976,100.02,85.1590,40.123,33.0503,null,100.02,null,100.02,300.343,100.02,41.0914,82.0151),(196,null,false,54,65,-32768,1256,-2147483648,-1188656418,null,245,10.657936,87.426674,300.343,95.2127316395285,42.0167,1.0607,19.1865,40.123,null,80.0223,null,2.0212,null,3.0599,300.343,300.343),(197,null,true,9,-128,245,-22687,-702035247,156461619,1,32679,16.704918,27.144121,43.0076514716217,79.53961659623093,18.0086,90.1809,57.0672,100.02,300.343,40.123,40.123,300.343,7.1142,100.02,3.1152,100.02),(198,null,false,20,-91,8077,25455,-2147483648,771014562,6033356893662555686,2244,300.343,87.73025,7.1216681183492145,51.66517820236684,null,100.02,40.123,52.1484,26.1710,16.0758,null,300.343,76.0330,77.1226,100.02,40.123),(199,true,true,-78,-4,-20877,10649,339605146,-2132984156,245,20240803,82.54591,300.343,null,17.455725893098002,65.0025,65.1163,null,100.02,null,60.0496,4.0522,40.123,93.1273,100.02,31.1751,100.02); + """ + + qt_sql """ select LAST_VALUE(col_tinyint_undef_signed_not_null) over (partition by col_double_undef_signed_not_null, col_int_undef_signed, (col_float_undef_signed_not_null - col_int_undef_signed), round_bankers(col_int_undef_signed) order by pk rows between unbounded preceding and 4 preceding) AS col_alias56089 from table_200_undef_partitions2_keys3_properties4_distributed_by53 order by col_alias56089; """ } diff --git a/regression-test/suites/nereids_syntax_p1/mv/aggregate/agg_sync_mv.groovy b/regression-test/suites/nereids_syntax_p1/mv/aggregate/agg_sync_mv.groovy new file mode 100644 index 00000000000000..b0f384c472c848 --- /dev/null +++ b/regression-test/suites/nereids_syntax_p1/mv/aggregate/agg_sync_mv.groovy @@ -0,0 +1,552 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("agg_sync_mv") { + sql """ use regression_test_nereids_syntax_p0_mv """ + sql """ SET enable_nereids_planner=true """ + sql """ SET enable_fallback_to_original_planner=false """ + sql """ analyze table agg_mv_test with sync""" + sql """ set enable_stats=false""" + + qt_select_any_value """select id, any_value(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync1 on agg_mv_test;""" + createMV("""create materialized view mv_sync1 as select id, any_value(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, any_value(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync1)" + } + qt_select_any_value_mv """select id, any_value(kint) from agg_mv_test group by id order by id;""" + + // sum_foreach is not supported in old planner + // qt_select_sum_foreach """select id, sum_foreach(kaint) from agg_mv_test group by id order by id;""" + // sql """drop materialized view if exists mv_sync2 on agg_mv_test;""" + // createMV("""create materialized view mv_sync2 as select id, sum_foreach(kaint) from agg_mv_test group by id order by id;""") + // explain { + // sql("select id, sum_foreach(kaint) from agg_mv_test group by id order by id;") + // contains "(mv_sync2)" + // } + // qt_select_sum_foreach_mv """select id, sum_foreach(kaint) from agg_mv_test group by id order by id;""" + + qt_select_approx_count_distinct """select id, approx_count_distinct(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync3 on agg_mv_test;""" + createMV("""create materialized view mv_sync3 as select id, approx_count_distinct(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, approx_count_distinct(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync3)" + } + qt_select_approx_count_distinct_mv """select id, approx_count_distinct(kint) from agg_mv_test group by id order by id;""" + + qt_select_collect_set """select id, collect_set(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync4 on agg_mv_test;""" + createMV("""create materialized view mv_sync4 as select id, collect_set(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, collect_set(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync4)" + } + qt_select_collect_set_mv """select id, collect_set(kint) from agg_mv_test group by id order by id;""" + + qt_select_collect_list """select id, collect_list(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync5 on agg_mv_test;""" + createMV("""create materialized view mv_sync5 as select id, collect_list(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, collect_list(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync5)" + } + qt_select_collect_list_mv """select id, collect_list(kint) from agg_mv_test group by id order by id;""" + + qt_select_corr """select id, corr(kint, kbint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync6 on agg_mv_test;""" + createMV("""create materialized view mv_sync6 as select id, corr(kint, kbint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, corr(kint, kbint) from agg_mv_test group by id order by id;") + contains "(mv_sync6)" + } + qt_select_corr_mv """select id, corr(kint, kbint) from agg_mv_test group by id order by id;""" + + qt_select_percentile_array """select id, percentile_array(kint, [0.5,0.55,0.805]) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync7 on agg_mv_test;""" + createMV("""create materialized view mv_sync7 as select id, percentile_array(kint, [0.5,0.55,0.805]) from agg_mv_test group by id order by id;""") + explain { + sql("select id, percentile_array(kint, [0.5,0.55,0.805]) from agg_mv_test group by id order by id;") + contains "(mv_sync7)" + } + qt_select_percentile_array_mv """select id, percentile_array(kint, [0.5,0.55,0.805]) from agg_mv_test group by id order by id;""" + + qt_select_quantile_union """select id, quantile_union(to_quantile_state(kbint, 2048)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync8 on agg_mv_test;""" + createMV("""create materialized view mv_sync8 as select id, quantile_union(to_quantile_state(kbint, 2048)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, quantile_union(to_quantile_state(kbint, 2048)) from agg_mv_test group by id order by id;") + contains "(mv_sync8)" + } + qt_select_quantile_union_mv """select id, quantile_union(to_quantile_state(kbint, 2048)) from agg_mv_test group by id order by id;""" + + qt_select_count_by_enum """select id, count_by_enum(kstr) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync9 on agg_mv_test;""" + createMV("""create materialized view mv_sync9 as select id, count_by_enum(kstr) from agg_mv_test group by id order by id;""") + explain { + sql("select id, count_by_enum(kstr) from agg_mv_test group by id order by id;") + contains "(mv_sync9)" + } + qt_select_count_by_enum_mv """select id, count_by_enum(kstr) from agg_mv_test group by id order by id;""" + + qt_select_avg_weighted """select id, avg_weighted(ktint, kdbl) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync10 on agg_mv_test;""" + createMV("""create materialized view mv_sync10 as select id, avg_weighted(ktint, kdbl) from agg_mv_test group by id order by id;""") + explain { + sql("select id, avg_weighted(ktint, kdbl) from agg_mv_test group by id order by id;") + contains "(mv_sync10)" + } + qt_select_avg_weighted_mv """select id, avg_weighted(ktint, kdbl) from agg_mv_test group by id order by id;""" + + qt_select_bitmap_intersect """select id, bitmap_intersect(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync11 on agg_mv_test;""" + createMV("""create materialized view mv_sync11 as select id, bitmap_intersect(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, bitmap_intersect(bitmap_hash(kbint)) from agg_mv_test group by id order by id;") + contains "(mv_sync11)" + } + qt_select_bitmap_intersect_mv """select id, bitmap_intersect(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + + qt_select_bitmap_agg """select id, bitmap_agg(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync12 on agg_mv_test;""" + createMV("""create materialized view mv_sync12 as select id, bitmap_agg(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, bitmap_agg(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync12)" + } + qt_select_bitmap_agg_mv """select id, bitmap_agg(kint) from agg_mv_test group by id order by id;""" + + qt_select_bitmap_union """select id, bitmap_union(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync13 on agg_mv_test;""" + createMV("""create materialized view mv_sync13 as select id, bitmap_union(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, bitmap_union(bitmap_hash(kbint)) from agg_mv_test group by id order by id;") + contains "(mv_sync13)" + } + qt_select_bitmap_union_mv """select id, bitmap_union(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + + qt_select_bitmap_union_count """select id, bitmap_union_count(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync14 on agg_mv_test;""" + createMV("""create materialized view mv_sync14 as select id, bitmap_union_count(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, bitmap_union_count(bitmap_hash(kbint)) from agg_mv_test group by id order by id;") + contains "(mv_sync14)" + } + qt_select_bitmap_union_count_mv """select id, bitmap_union_count(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + + qt_select_bitmap_union_int """select id, bitmap_union_int(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync15 on agg_mv_test;""" + createMV("""create materialized view mv_sync15 as select id, bitmap_union_int(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, bitmap_union_int(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync15)" + } + qt_select_bitmap_union_int_mv """select id, bitmap_union_int(kint) from agg_mv_test group by id order by id;""" + + qt_select_group_array_intersect """select id, group_array_intersect(kaint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync16 on agg_mv_test;""" + createMV("""create materialized view mv_sync16 as select id, group_array_intersect(kaint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, group_array_intersect(kaint) from agg_mv_test group by id order by id;") + contains "(mv_sync16)" + } + qt_select_group_array_intersect_mv """select id, group_array_intersect(kaint) from agg_mv_test group by id order by id;""" + + qt_select_group_bit_and """select id, group_bit_and(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync17 on agg_mv_test;""" + createMV("""create materialized view mv_sync17 as select id, group_bit_and(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, group_bit_and(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync17)" + } + qt_select_group_bit_and_mv """select id, group_bit_and(kint) from agg_mv_test group by id order by id;""" + + qt_select_group_bit_or """select id, group_bit_or(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync18 on agg_mv_test;""" + createMV("""create materialized view mv_sync18 as select id, group_bit_or(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, group_bit_or(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync18)" + } + qt_select_group_bit_or_mv """select id, group_bit_or(kint) from agg_mv_test group by id order by id;""" + + qt_select_group_bit_xor """select id, group_bit_xor(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync19 on agg_mv_test;""" + createMV("""create materialized view mv_sync19 as select id, group_bit_xor(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, group_bit_xor(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync19)" + } + qt_select_group_bit_xor_mv """select id, group_bit_xor(kint) from agg_mv_test group by id order by id;""" + + qt_select_group_bitmap_xor """select id, group_bitmap_xor(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync20 on agg_mv_test;""" + createMV("""create materialized view mv_sync20 as select id, group_bitmap_xor(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, group_bitmap_xor(bitmap_hash(kbint)) from agg_mv_test group by id order by id;") + contains "(mv_sync20)" + } + qt_select_group_bitmap_xor_mv """select id, group_bitmap_xor(bitmap_hash(kbint)) from agg_mv_test group by id order by id;""" + + qt_select_hll_union_agg """select id, hll_union_agg(hll_hash(kbint)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync21 on agg_mv_test;""" + createMV("""create materialized view mv_sync21 as select id, hll_union_agg(hll_hash(kbint)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, hll_union_agg(hll_hash(kbint)) from agg_mv_test group by id order by id;") + contains "(mv_sync21)" + } + qt_select_hll_union_agg_mv """select id, hll_union_agg(hll_hash(kbint)) from agg_mv_test group by id order by id;""" + + qt_select_hll_union """select id, hll_union(hll_hash(kbint)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync22 on agg_mv_test;""" + createMV("""create materialized view mv_sync22 as select id, hll_union(hll_hash(kbint)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, hll_union(hll_hash(kbint)) from agg_mv_test group by id order by id;") + contains "(mv_sync22)" + } + qt_select_hll_union_mv """select id, hll_union(hll_hash(kbint)) from agg_mv_test group by id order by id;""" + + qt_select_intersect_count """select id, intersect_count(bitmap_hash(kbint), kint, 3, 4) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync23 on agg_mv_test;""" + createMV("""create materialized view mv_sync23 as select id, intersect_count(bitmap_hash(kbint), kint, 3, 4) from agg_mv_test group by id order by id;""") + explain { + sql("select id, intersect_count(bitmap_hash(kbint), kint, 3, 4) from agg_mv_test group by id order by id;") + contains "(mv_sync23)" + } + qt_select_intersect_count_mv """select id, intersect_count(bitmap_hash(kbint), kint, 3, 4) from agg_mv_test group by id order by id;""" + + qt_select_group_concat """select id, group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync24 on agg_mv_test;""" + createMV("""create materialized view mv_sync24 as select id, group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;") + contains "(mv_sync24)" + } + qt_select_group_concat_mv """select id, group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;""" + + qt_select_multi_distinct_group_concat """select id, multi_distinct_group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync25 on agg_mv_test;""" + createMV("""create materialized view mv_sync25 as select id, multi_distinct_group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;""") + explain { + sql("select id, multi_distinct_group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;") + contains "(mv_sync25)" + } + qt_select_multi_distinct_group_concat_mv """select id, multi_distinct_group_concat(cast(abs(kint) as varchar)) from agg_mv_test group by id order by id;""" + + qt_select_multi_distinct_sum0 """select id, multi_distinct_sum0(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync26 on agg_mv_test;""" + createMV("""create materialized view mv_sync26 as select id, multi_distinct_sum0(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, multi_distinct_sum0(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync26)" + } + qt_select_multi_distinct_sum0_mv """select id, multi_distinct_sum0(kint) from agg_mv_test group by id order by id;""" + + qt_select_multi_distinct_sum """select id, multi_distinct_sum(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync27 on agg_mv_test;""" + createMV("""create materialized view mv_sync27 as select id, multi_distinct_sum(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, multi_distinct_sum(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync27)" + } + qt_select_multi_distinct_sum_mv """select id, multi_distinct_sum(kint) from agg_mv_test group by id order by id;""" + + + qt_select_histogram """select id, histogram(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync28 on agg_mv_test;""" + createMV("""create materialized view mv_sync28 as select id, histogram(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, histogram(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync28)" + } + qt_select_histogram_mv """select id, histogram(kint) from agg_mv_test group by id order by id;""" + + qt_select_max_by """select id, max_by(kint, kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync29 on agg_mv_test;""" + createMV("""create materialized view mv_sync29 as select id, max_by(kint, kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, max_by(kint, kint) from agg_mv_test group by id order by id;") + contains "(mv_sync29)" + } + qt_select_max_by_mv """select id, max_by(kint, kint) from agg_mv_test group by id order by id;""" + + qt_select_min_by """select id, min_by(kint, kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync30 on agg_mv_test;""" + createMV("""create materialized view mv_sync30 as select id, min_by(kint, kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, min_by(kint, kint) from agg_mv_test group by id order by id;") + contains "(mv_sync30)" + } + qt_select_min_by_mv """select id, min_by(kint, kint) from agg_mv_test group by id order by id;""" + + qt_select_multi_distinct_count """select id, multi_distinct_count(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync31 on agg_mv_test;""" + createMV("""create materialized view mv_sync31 as select id, multi_distinct_count(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, multi_distinct_count(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync31)" + } + qt_select_multi_distinct_count_mv """select id, multi_distinct_count(kint) from agg_mv_test group by id order by id;""" + + qt_select_ndv """select id, ndv(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync32 on agg_mv_test;""" + createMV("""create materialized view mv_sync32 as select id, ndv(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, ndv(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync32)" + } + qt_select_ndv_mv """select id, ndv(kint) from agg_mv_test group by id order by id;""" + + qt_select_covar """select id, covar(kint, kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync33 on agg_mv_test;""" + createMV("""create materialized view mv_sync33 as select id, covar(kint, kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, covar(kint, kint) from agg_mv_test group by id order by id;") + contains "(mv_sync33)" + } + qt_select_covar_mv """select id, covar(kint, kint) from agg_mv_test group by id order by id;""" + + qt_select_covar_samp """select id, covar_samp(kint, kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync34 on agg_mv_test;""" + createMV("""create materialized view mv_sync34 as select id, covar_samp(kint, kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, covar_samp(kint, kint) from agg_mv_test group by id order by id;") + contains "(mv_sync34)" + } + qt_select_covar_samp_mv """select id, covar_samp(kint, kint) from agg_mv_test group by id order by id;""" + + qt_select_percentile """select id, percentile(kbint, 0.6) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync35 on agg_mv_test;""" + createMV("""create materialized view mv_sync35 as select id, percentile(kbint, 0.6) from agg_mv_test group by id order by id;""") + explain { + sql("select id, percentile(kbint, 0.6) from agg_mv_test group by id order by id;") + contains "(mv_sync35)" + } + qt_select_percentile_mv """select id, percentile(kbint, 0.6) from agg_mv_test group by id order by id;""" + + qt_select_percentile_approx """select id, percentile_approx(kbint, 0.6) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync36 on agg_mv_test;""" + createMV("""create materialized view mv_sync36 as select id, percentile_approx(kbint, 0.6) from agg_mv_test group by id order by id;""") + explain { + sql("select id, percentile_approx(kbint, 0.6) from agg_mv_test group by id order by id;") + contains "(mv_sync36)" + } + qt_select_percentile_approx_mv """select id, percentile_approx(kbint, 0.6) from agg_mv_test group by id order by id;""" + + // percentile_approx_weighted is not supported in old planner + // qt_select_percentile_approx_weighted """select id, percentile_approx_weighted(kint, kbint, 0.6) from agg_mv_test group by id order by id;""" + // sql """drop materialized view if exists mv_sync37 on agg_mv_test;""" + // createMV("""create materialized view mv_sync37 as select id, percentile_approx_weighted(kint, kbint, 0.6) from agg_mv_test group by id order by id;""") + // explain { + // sql("select id, percentile_approx_weighted(kint, kbint, 0.6) from agg_mv_test group by id order by id;") + // contains "(mv_sync37)" + // } + // qt_select_percentile_approx_weighted_mv """select id, percentile_approx_weighted(kint, kbint, 0.6) from agg_mv_test group by id order by id;""" + + qt_select_sequence_count """select id, sequence_count('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync38 on agg_mv_test;""" + createMV("""create materialized view mv_sync38 as select id, sequence_count('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;""") + explain { + sql("select id, sequence_count('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;") + contains "(mv_sync38)" + } + qt_select_sequence_count_mv """select id, sequence_count('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" + + qt_select_sequence_match """select id, sequence_match('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync39 on agg_mv_test;""" + createMV("""create materialized view mv_sync39 as select id, sequence_match('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;""") + explain { + sql("select id, sequence_match('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;") + contains "(mv_sync39)" + } + qt_select_sequence_match_mv """select id, sequence_match('(?1)(?2)', kdtv2, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" + + qt_select_stddev """select id, stddev(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync40 on agg_mv_test;""" + createMV("""create materialized view mv_sync40 as select id, stddev(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, stddev(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync40)" + } + qt_select_stddev_mv """select id, stddev(kint) from agg_mv_test group by id order by id;""" + + qt_select_stddev_pop """select id, stddev_pop(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync41 on agg_mv_test;""" + createMV("""create materialized view mv_sync41 as select id, stddev_pop(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, stddev_pop(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync41)" + } + qt_select_stddev_pop_mv """select id, stddev_pop(kint) from agg_mv_test group by id order by id;""" + + qt_select_stddev_samp """select id, stddev_samp(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync42 on agg_mv_test;""" + createMV("""create materialized view mv_sync42 as select id, stddev_samp(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, stddev_samp(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync42)" + } + qt_select_stddev_samp_mv """select id, stddev_samp(kint) from agg_mv_test group by id order by id;""" + + qt_select_sum0 """select id, sum0(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync43 on agg_mv_test;""" + createMV("""create materialized view mv_sync43 as select id, sum0(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, sum0(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync43)" + } + qt_select_sum0_mv """select id, sum0(kint) from agg_mv_test group by id order by id;""" + + qt_select_topn """select id, topn(kvchrs1, 3) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync44 on agg_mv_test;""" + createMV("""create materialized view mv_sync44 as select id, topn(kvchrs1, 3) from agg_mv_test group by id order by id;""") + explain { + sql("select id, topn(kvchrs1, 3) from agg_mv_test group by id order by id;") + contains "(mv_sync44)" + } + qt_select_topn_mv """select id, topn(kvchrs1, 3) from agg_mv_test group by id order by id;""" + + qt_select_topn_array """select id, topn_array(kvchrs1, 3) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync45 on agg_mv_test;""" + createMV("""create materialized view mv_sync45 as select id, topn_array(kvchrs1, 3) from agg_mv_test group by id order by id;""") + explain { + sql("select id, topn_array(kvchrs1, 3) from agg_mv_test group by id order by id;") + contains "(mv_sync45)" + } + qt_select_topn_array_mv """select id, topn_array(kvchrs1, 3) from agg_mv_test group by id order by id;""" + + qt_select_topn_weighted """select id, topn_weighted(kvchrs1, ktint, 3) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync46 on agg_mv_test;""" + createMV("""create materialized view mv_sync46 as select id, topn_weighted(kvchrs1, ktint, 3) from agg_mv_test group by id order by id;""") + explain { + sql("select id, topn_weighted(kvchrs1, ktint, 3) from agg_mv_test group by id order by id;") + contains "(mv_sync46)" + } + qt_select_topn_weighted_mv """select id, topn_weighted(kvchrs1, ktint, 3) from agg_mv_test group by id order by id;""" + + qt_select_variance """select id, variance(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync47 on agg_mv_test;""" + createMV("""create materialized view mv_sync47 as select id, variance(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, variance(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync47)" + } + qt_select_variance_mv """select id, variance(kint) from agg_mv_test group by id order by id;""" + + qt_select_var_pop """select id, var_pop(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync48 on agg_mv_test;""" + createMV("""create materialized view mv_sync48 as select id, var_pop(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, var_pop(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync47)" + } + qt_select_var_pop_mv """select id, var_pop(kint) from agg_mv_test group by id order by id;""" + + qt_select_variance_samp """select id, variance_samp(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync49 on agg_mv_test;""" + createMV("""create materialized view mv_sync49 as select id, variance_samp(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, variance_samp(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync49)" + } + qt_select_variance_samp_mv """select id, variance_samp(kint) from agg_mv_test group by id order by id;""" + + qt_select_var_samp """select id, var_samp(kint) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync50 on agg_mv_test;""" + createMV("""create materialized view mv_sync50 as select id, var_samp(kint) from agg_mv_test group by id order by id;""") + explain { + sql("select id, var_samp(kint) from agg_mv_test group by id order by id;") + contains "(mv_sync50)" + } + qt_select_var_samp_mv """select id, var_samp(kint) from agg_mv_test group by id order by id;""" + + qt_select_window_funnel """select id, window_funnel(3600 * 3, 'default', kdtm, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync51 on agg_mv_test;""" + createMV("""create materialized view mv_sync51 as select id, window_funnel(3600 * 3, 'default', kdtm, kint = 1, kint = 2) from agg_mv_test group by id order by id;""") + explain { + sql("select id, window_funnel(3600 * 3, 'default', kdtm, kint = 1, kint = 2) from agg_mv_test group by id order by id;") + contains "(mv_sync51)" + } + qt_select_window_funnel_mv """select id, window_funnel(3600 * 3, 'default', kdtm, kint = 1, kint = 2) from agg_mv_test group by id order by id;""" + + // map_agg is not supported yet + // qt_select_map_agg """select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""" + // sql """drop materialized view if exists mv_sync52 on agg_mv_test;""" + // createMV("""create materialized view mv_sync52 as select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""") + // explain { + // sql("select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;") + // contains "(mv_sync52)" + // } + // qt_select_map_agg_mv """select id, map_agg(kint, kstr) from agg_mv_test group by id order by id;""" + + // array_agg is not supported yet + // qt_select_array_agg """select id, array_agg(kstr) from agg_mv_test group by id order by id;""" + // sql """drop materialized view if exists mv_sync53 on agg_mv_test;""" + // createMV("""create materialized view mv_sync53 as select id, array_agg(kstr) from agg_mv_test group by id order by id;""") + // explain { + // sql("select id, array_agg(kstr) from agg_mv_test group by id order by id;") + // contains "(mv_sync53)" + // } + // qt_select_array_agg_mv """select id, array_agg(kstr) from agg_mv_test group by id order by id;""" + + qt_select_retention """select id, retention(kdtm = '2012-03-11', kdtm = '2012-03-12') from agg_mv_test group by id order by id;""" + sql """drop materialized view if exists mv_sync54 on agg_mv_test;""" + createMV("""create materialized view mv_sync54 as select id, retention(kdtm = '2012-03-11', kdtm = '2012-03-12') from agg_mv_test group by id order by id;""") + explain { + sql("select id, retention(kdtm = '2012-03-11', kdtm = '2012-03-12') from agg_mv_test group by id order by id;") + contains "(mv_sync54)" + } + qt_select_retention_mv """select id, retention(kdtm = '2012-03-11', kdtm = '2012-03-12') from agg_mv_test group by id order by id;""" + + + streamLoad { + table "agg_mv_test" + db "regression_test_nereids_syntax_p0_mv" + set 'column_separator', ';' + set 'columns', ''' + id, kbool, ktint, ksint, kint, kbint, klint, kfloat, kdbl, kdcmls1, kdcmls2, kdcmls3, + kdcmlv3s1, kdcmlv3s2, kdcmlv3s3, kchrs1, kchrs2, kchrs3, kvchrs1, kvchrs2, kvchrs3, kstr, + kdt, kdtv2, kdtm, kdtmv2s1, kdtmv2s2, kdtmv2s3, kabool, katint, kasint, kaint, + kabint, kalint, kafloat, kadbl, kadt, kadtm, kadtv2, kadtmv2, kachr, kavchr, kastr, kadcml, + st_point_str, st_point_vc, x_lng, x_lat, y_lng, y_lat, z_lng, z_lat, radius, linestring_wkt, polygon_wkt, + km_bool_tint, km_tint_tint, km_sint_tint, km_int_tint, km_bint_tint, km_lint_tint, km_float_tint, + km_dbl_tint, km_dcml_tint, km_chr_tint, km_vchr_tint, km_str_tint, km_date_tint, km_dtm_tint, + km_tint_bool, km_int_int, km_tint_sint, km_tint_int, km_tint_bint, km_tint_lint, km_tint_float, + km_tint_dbl, km_tint_dcml, km_tint_chr, km_tint_vchr, km_tint_str, km_tint_date, km_tint_dtm, kjson, kstruct + ''' + file "../agg_mv_test.dat" + } + + + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + sql "insert into agg_mv_test select * from agg_mv_test;" + + sql "set parallel_pipeline_task_num=1" + qt_test "select kbint, map_agg(id, kstr) from agg_mv_test group by kbint order by kbint;" +} diff --git a/regression-test/suites/nereids_syntax_p1/mv/load.groovy b/regression-test/suites/nereids_syntax_p1/mv/load.groovy new file mode 100644 index 00000000000000..23b625a127f90d --- /dev/null +++ b/regression-test/suites/nereids_syntax_p1/mv/load.groovy @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("load") { + sql "drop table if exists agg_mv_test" + sql """ + CREATE TABLE IF NOT EXISTS `agg_mv_test` ( + `id` int null, + `kbool` boolean null, + `ktint` tinyint(4) null, + `ksint` smallint(6) null, + `kint` int(11) null, + `kbint` bigint(20) null, + `klint` largeint(40) null, + `kfloat` float null, + `kdbl` double null, + `kdcmls1` decimal(9, 3) null, + `kdcmls2` decimal(15, 5) null, + `kdcmls3` decimal(27, 9) null, + `kdcmlv3s1` decimalv3(9, 3) null, + `kdcmlv3s2` decimalv3(15, 5) null, + `kdcmlv3s3` decimalv3(27, 9) null, + `kchrs1` char(10) null, + `kchrs2` char(20) null, + `kchrs3` char(50) null, + `kvchrs1` varchar(10) null, + `kvchrs2` varchar(20) null, + `kvchrs3` varchar(50) null, + `kstr` string null, + `kdt` date null, + `kdtv2` datev2 null, + `kdtm` datetime null, + `kdtmv2s1` datetimev2(0) null, + `kdtmv2s2` datetimev2(4) null, + `kdtmv2s3` datetimev2(6) null, + `kabool` array null, + `katint` array null, + `kasint` array null, + `kaint` array null, + `kabint` array null, + `kalint` array null, + `kafloat` array null, + `kadbl` array null, + `kadt` array null, + `kadtm` array null, + `kadtv2` array null, + `kadtmv2` array null, + `kachr` array null, + `kavchr` array null, + `kastr` array null, + `kadcml` array null, + `st_point_str` string null, + `st_point_vc` varchar(50) null, + `x_lng` double null, + `x_lat` double null, + `y_lng` double null, + `y_lat` double null, + `z_lng` double null, + `z_lat` double null, + `radius` double null, + `linestring_wkt` varchar(50) null, + `polygon_wkt` varchar(50) null, + `km_bool_tint` map null, + `km_tint_tint` map null, + `km_sint_tint` map null, + `km_int_tint` map null, + `km_bint_tint` map null, + `km_lint_tint` map null, + `km_float_tint` map null, + `km_dbl_tint` map null, + `km_dcml_tint` map null, + `km_chr_tint` map null, + `km_vchr_tint` map null, + `km_str_tint` map null, + `km_date_tint` map null, + `km_dtm_tint` map null, + `km_tint_bool` map null, + `km_int_int` map null, + `km_tint_sint` map null, + `km_tint_int` map null, + `km_tint_bint` map null, + `km_tint_lint` map null, + `km_tint_float` map null, + `km_tint_dbl` map null, + `km_tint_dcml` map null, + `km_tint_chr` map null, + `km_tint_vchr` map null, + `km_tint_str` map null, + `km_tint_date` map null, + `km_tint_dtm` map null, + `kjson` JSON null, + `kstruct` STRUCT null + ) engine=olap + DISTRIBUTED BY HASH(`id`) BUCKETS 4 + properties("replication_num" = "1") + """ + + streamLoad { + table "agg_mv_test" + db "regression_test_nereids_syntax_p0_mv" + set 'column_separator', ';' + set 'columns', ''' + id, kbool, ktint, ksint, kint, kbint, klint, kfloat, kdbl, kdcmls1, kdcmls2, kdcmls3, + kdcmlv3s1, kdcmlv3s2, kdcmlv3s3, kchrs1, kchrs2, kchrs3, kvchrs1, kvchrs2, kvchrs3, kstr, + kdt, kdtv2, kdtm, kdtmv2s1, kdtmv2s2, kdtmv2s3, kabool, katint, kasint, kaint, + kabint, kalint, kafloat, kadbl, kadt, kadtm, kadtv2, kadtmv2, kachr, kavchr, kastr, kadcml, + st_point_str, st_point_vc, x_lng, x_lat, y_lng, y_lat, z_lng, z_lat, radius, linestring_wkt, polygon_wkt, + km_bool_tint, km_tint_tint, km_sint_tint, km_int_tint, km_bint_tint, km_lint_tint, km_float_tint, + km_dbl_tint, km_dcml_tint, km_chr_tint, km_vchr_tint, km_str_tint, km_date_tint, km_dtm_tint, + km_tint_bool, km_int_int, km_tint_sint, km_tint_int, km_tint_bint, km_tint_lint, km_tint_float, + km_tint_dbl, km_tint_dcml, km_tint_chr, km_tint_vchr, km_tint_str, km_tint_date, km_tint_dtm, kjson, kstruct + ''' + file "agg_mv_test.dat" + } +} diff --git a/regression-test/suites/node_p0/test_backend.groovy b/regression-test/suites/node_p0/test_backend.groovy index cce111b0a19076..5e68e5019acede 100644 --- a/regression-test/suites/node_p0/test_backend.groovy +++ b/regression-test/suites/node_p0/test_backend.groovy @@ -24,23 +24,28 @@ suite("test_backend", "nonConcurrent") { logger.info("result:${result}") sql """ALTER SYSTEM ADD BACKEND "${address}:${notExistPort}";""" + waitAddBeFinished(address, notExistPort) result = sql """SHOW BACKENDS;""" logger.info("result:${result}") - sql """ALTER SYSTEM MODIFY BACKEND "${address}:${notExistPort}" SET ("disable_query" = "true"); """ - sql """ALTER SYSTEM MODIFY BACKEND "${address}:${notExistPort}" SET ("disable_load" = "true"); """ + if (!isCloudMode()) { + sql """ALTER SYSTEM MODIFY BACKEND "${address}:${notExistPort}" SET ("disable_query" = "true"); """ + sql """ALTER SYSTEM MODIFY BACKEND "${address}:${notExistPort}" SET ("disable_load" = "true"); """ + } result = sql """SHOW BACKENDS;""" logger.info("result:${result}") sql """ALTER SYSTEM DROPP BACKEND "${address}:${notExistPort}";""" + waitDropBeFinished(address, notExistPort) result = sql """SHOW BACKENDS;""" logger.info("result:${result}") } - if (context.config.jdbcUser.equals("root")) { + // Cancel decommission backend is not supported in cloud mode. + if (context.config.jdbcUser.equals("root") && !isCloudMode()) { def beId1 = null try { GetDebugPoint().enableDebugPointForAllFEs("SystemHandler.decommission_no_check_replica_num"); diff --git a/regression-test/suites/node_p0/test_frontend.groovy b/regression-test/suites/node_p0/test_frontend.groovy index c861b0e6b80b90..4478a1d3709c28 100644 --- a/regression-test/suites/node_p0/test_frontend.groovy +++ b/regression-test/suites/node_p0/test_frontend.groovy @@ -24,18 +24,22 @@ suite("test_frontend") { logger.debug("result:${result}") sql """ALTER SYSTEM ADD FOLLOWER "${address}:${notExistPort}";""" + waitAddFeFinished(address, notExistPort); result = sql """SHOW FRONTENDS;""" logger.debug("result:${result}") sql """ALTER SYSTEM DROP FOLLOWER "${address}:${notExistPort}";""" + waitDropFeFinished(address, notExistPort); result = sql """SHOW FRONTENDS;""" logger.debug("result:${result}") sql """ALTER SYSTEM ADD OBSERVER "${address}:${notExistPort}";""" + waitAddFeFinished(address, notExistPort); result = sql """SHOW FRONTENDS;""" logger.debug("result:${result}") sql """ALTER SYSTEM DROP OBSERVER "${address}:${notExistPort}";""" + waitDropFeFinished(address, notExistPort); result = sql """SHOW FRONTENDS;""" logger.debug("result:${result}") } diff --git a/regression-test/suites/partition_p0/auto_partition/test_auto_dynamic.groovy b/regression-test/suites/partition_p0/auto_partition/test_auto_dynamic.groovy index c5fa8a736dbf74..a9e8a134fee086 100644 --- a/regression-test/suites/partition_p0/auto_partition/test_auto_dynamic.groovy +++ b/regression-test/suites/partition_p0/auto_partition/test_auto_dynamic.groovy @@ -1,5 +1,3 @@ - - // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -19,6 +17,7 @@ suite("test_auto_dynamic", "nonConcurrent") { // PROHIBIT different timeunit of interval when use both auto & dynamic partition + sql " drop table if exists tbl3 " test{ sql """ CREATE TABLE tbl3 @@ -117,7 +116,23 @@ suite("test_auto_dynamic", "nonConcurrent") { part_result = sql " show partitions from auto_dynamic " assertEquals(part_result.size, 1) - sql " insert into auto_dynamic values ('2024-01-01'), ('2900-01-01'), ('1900-01-01'), ('3000-01-01'); " + def skip_test = false + test { + sql " insert into auto_dynamic values ('2024-01-01'), ('2900-01-01'), ('1900-01-01'), ('3000-01-01'); " + check { result, exception, startTime, endTime -> + if (exception != null) { + // the partition of 1900-01-01 directly been recovered before the insert txn finished. let it success + part_result = sql " show partitions from auto_dynamic " + log.info("${part_result}".toString()) + assertTrue(exception.getMessage().contains("get partition p19000101000000 failed")) + skip_test = true + } + } + } + if (skip_test) { + return true + } + sql """ admin set frontend config ('dynamic_partition_check_interval_seconds' = '1') """ sleep(2000) part_result = sql " show partitions from auto_dynamic " diff --git a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy index ec626eaa6915d2..a4b3eb661768dd 100644 --- a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy +++ b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy @@ -88,10 +88,7 @@ suite("test_dynamic_partition") { assertEquals(result.get(0).Buckets.toInteger(), 3) sql "drop table dy_par_bucket_set_by_distribution" sql "drop table if exists dy_par_bad" - def isCloudMode = { - def ret = sql_return_maparray """show backends""" - ret.Tag[0].contains("cloud_cluster_name") - } + def isCloudMode = isCloudMode() // not support tag in cloud mode if (!isCloudMode) { diff --git a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy index c52e5897aa0880..51ddf0ef853d3d 100644 --- a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy +++ b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy @@ -39,7 +39,7 @@ suite("test_dynamic_partition_mod_distribution_key", "docker") { v INT ${aggType} ) ${key} PARTITION BY RANGE(k1) () - DISTRIBUTED BY HASH(k1) BUCKETS 1 + DISTRIBUTED BY HASH(k1, k2) BUCKETS 1 PROPERTIES ( "dynamic_partition.enable"="true", "dynamic_partition.end"="3", diff --git a/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy b/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy index b403b3a2f51ee2..33b9e1b1c6dbc5 100644 --- a/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy +++ b/regression-test/suites/prepared_stmt_p0/prepared_stmt.groovy @@ -151,8 +151,6 @@ suite("test_prepared_stmt", "nonConcurrent") { sql """insert into mytable1 values(2,1,'user1',null);""" - // sql "set experimental_enable_nereids_planner = false" - stmt_read = prepareStatement "SELECT *, ? FROM (select *, ? from mytable1 where pv is null) AS `SpotfireCustomQuery1` WHERE 1 = 1" assertEquals(com.mysql.cj.jdbc.ServerPreparedStatement, stmt_read.class) stmt_read.setString(1, "xxxlalala") @@ -163,8 +161,6 @@ suite("test_prepared_stmt", "nonConcurrent") { qe_select7 stmt_read // stmt_read.close() - // sql "set experimental_enable_nereids_planner = true" - stmt_read.setString(1, "xxxlalala") stmt_read.setDouble(2, 1234.1111) qe_select6_1 stmt_read diff --git a/regression-test/suites/query_p0/aggregate/array_agg.groovy b/regression-test/suites/query_p0/aggregate/array_agg.groovy index 1484125d0da6cc..217285b572c538 100644 --- a/regression-test/suites/query_p0/aggregate/array_agg.groovy +++ b/regression-test/suites/query_p0/aggregate/array_agg.groovy @@ -20,6 +20,8 @@ suite("array_agg") { sql "DROP TABLE IF EXISTS `test_array_agg1`;" sql "DROP TABLE IF EXISTS `test_array_agg_int`;" sql "DROP TABLE IF EXISTS `test_array_agg_decimal`;" + sql "DROP TABLE IF EXISTS `test_array_agg_complex`;" + sql """ CREATE TABLE `test_array_agg` ( `id` int(11) NOT NULL, @@ -249,6 +251,31 @@ suite("array_agg") { SELECT count(value_field), size(array_agg(label_name)) FROM `test_array_agg` GROUP BY value_field order by value_field; """ + // only support nereids + sql "SET enable_nereids_planner=true;" + sql "SET enable_fallback_to_original_planner=false;" + sql """ CREATE TABLE IF NOT EXISTS test_array_agg_complex (id int, kastr array, km map, ks STRUCT) engine=olap + DISTRIBUTED BY HASH(`id`) BUCKETS 4 + properties("replication_num" = "1") """ + streamLoad { + table "test_array_agg_complex" + file "test_array_agg_complex.csv" + time 60000 + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals(112, json.NumberTotalRows) + assertEquals(112, json.NumberLoadedRows) + } + } + + order_qt_sql_array_agg_array """ SELECT id, array_agg(kastr) FROM test_array_agg_complex GROUP BY id ORDER BY id """ + order_qt_sql_array_agg_map """ SELECT id, array_agg(km) FROM test_array_agg_complex GROUP BY id ORDER BY id """ + order_qt_sql_array_agg_struct """ SELECT id, array_agg(ks) FROM test_array_agg_complex GROUP BY id ORDER BY id """ sql "DROP TABLE `test_array_agg`" sql "DROP TABLE `test_array_agg1`" diff --git a/regression-test/suites/query_p0/aggregate/select_random_distributed_tbl.groovy b/regression-test/suites/query_p0/aggregate/select_random_distributed_tbl.groovy index 5c99a0a4aa02de..df4095a127bb43 100644 --- a/regression-test/suites/query_p0/aggregate/select_random_distributed_tbl.groovy +++ b/regression-test/suites/query_p0/aggregate/select_random_distributed_tbl.groovy @@ -50,72 +50,63 @@ suite("select_random_distributed_tbl") { sql """ insert into ${tableName} values(2,"b",5,5,5,avg_state(5),hll_hash(5),bitmap_hash(5),to_quantile_state(5, 2048)) """ sql """ insert into ${tableName} values(2,"b",6,6,6,avg_state(6),hll_hash(6),bitmap_hash(6),to_quantile_state(6, 2048)) """ - for (int i = 0; i < 2; ++i) { - if (i == 0) { - // test legacy planner - sql "set enable_nereids_planner = false;" - } else if (i == 1) { - // test nereids planner - sql "set enable_nereids_planner = true;" - } - def whereStr = "" - for (int j = 0; j < 2; ++j) { - if (j == 1) { - // test with filter - whereStr = "where k1 > 0" - } - def sql1 = "select * except (v_generic) from ${tableName} ${whereStr} order by k1, k2" - qt_sql_1 "${sql1}" + def whereStr = "" + for (int j = 0; j < 2; ++j) { + if (j == 1) { + // test with filter + whereStr = "where k1 > 0" + } + def sql1 = "select * except (v_generic) from ${tableName} ${whereStr} order by k1, k2" + qt_sql_1 "${sql1}" - def sql2 = "select k1 ,k2 ,v_sum ,v_max ,v_min ,v_hll ,v_bitmap ,v_quantile_union from ${tableName} ${whereStr} order by k1, k2" - qt_sql_2 "${sql2}" + def sql2 = "select k1 ,k2 ,v_sum ,v_max ,v_min ,v_hll ,v_bitmap ,v_quantile_union from ${tableName} ${whereStr} order by k1, k2" + qt_sql_2 "${sql2}" - def sql3 = "select k1+1, k2, v_sum from ${tableName} ${whereStr} order by k1, k2" - qt_sql_3 "${sql3}" + def sql3 = "select k1+1, k2, v_sum from ${tableName} ${whereStr} order by k1, k2" + qt_sql_3 "${sql3}" - def sql4 = "select k1, k2, v_sum+1 from ${tableName} ${whereStr} order by k1, k2" - qt_sql_4 "${sql4}" + def sql4 = "select k1, k2, v_sum+1 from ${tableName} ${whereStr} order by k1, k2" + qt_sql_4 "${sql4}" - def sql5 = """ select k1, sum(v_sum), max(v_max), min(v_min), avg_merge(v_generic), + def sql5 = """ select k1, sum(v_sum), max(v_max), min(v_min), avg_merge(v_generic), hll_union_agg(v_hll), bitmap_union_count(v_bitmap), quantile_percent(quantile_union(v_quantile_union),0.5) from ${tableName} ${whereStr} group by k1 order by k1 """ - qt_sql_5 "${sql5}" + qt_sql_5 "${sql5}" - def sql6 = "select count(1) from ${tableName} ${whereStr}" - qt_sql_6 "${sql6}" + def sql6 = "select count(1) from ${tableName} ${whereStr}" + qt_sql_6 "${sql6}" - def sql7 = "select count(*) from ${tableName} ${whereStr}" - qt_sql_7 "${sql7}" + def sql7 = "select count(*) from ${tableName} ${whereStr}" + qt_sql_7 "${sql7}" - def sql8 = "select max(k1) from ${tableName} ${whereStr}" - qt_sql_8 "${sql8}" + def sql8 = "select max(k1) from ${tableName} ${whereStr}" + qt_sql_8 "${sql8}" - def sql9 = "select max(v_sum) from ${tableName} ${whereStr}" - qt_sql_9 "${sql9}" + def sql9 = "select max(v_sum) from ${tableName} ${whereStr}" + qt_sql_9 "${sql9}" - def sql10 = "select sum(v_max) from ${tableName} ${whereStr}" - qt_sql_10 "${sql10}" + def sql10 = "select sum(v_max) from ${tableName} ${whereStr}" + qt_sql_10 "${sql10}" - def sql11 = "select sum(v_min) from ${tableName} ${whereStr}" - qt_sql_11 "${sql11}" + def sql11 = "select sum(v_min) from ${tableName} ${whereStr}" + qt_sql_11 "${sql11}" - // test group by value - def sql12 = "select v_min, sum(v_sum) from ${tableName} ${whereStr} group by v_min order by v_min" - qt_sql_12 "${sql12}" + // test group by value + def sql12 = "select v_min, sum(v_sum) from ${tableName} ${whereStr} group by v_min order by v_min" + qt_sql_12 "${sql12}" - def sql13 = "select count(k1) from ${tableName} ${whereStr}" - qt_sql_13 "${sql13}" + def sql13 = "select count(k1) from ${tableName} ${whereStr}" + qt_sql_13 "${sql13}" - def sql14 = "select count(distinct k1) from ${tableName} ${whereStr}" - qt_sql_14 "${sql14}" + def sql14 = "select count(distinct k1) from ${tableName} ${whereStr}" + qt_sql_14 "${sql14}" - def sql15 = "select count(v_sum) from ${tableName} ${whereStr}" - qt_sql_15 "${sql15}" + def sql15 = "select count(v_sum) from ${tableName} ${whereStr}" + qt_sql_15 "${sql15}" - def sql16 = "select count(distinct v_sum) from ${tableName} ${whereStr}" - qt_sql_16 "${sql16}" - } + def sql16 = "select count(distinct v_sum) from ${tableName} ${whereStr}" + qt_sql_16 "${sql16}" } sql "drop table ${tableName};" @@ -138,11 +129,6 @@ suite("select_random_distributed_tbl") { sql """ insert into random_distributed_tbl_test_2 values(1, 999999999999999.99); """ sql """ insert into random_distributed_tbl_test_2 values(3, 999999999999999.99); """ - sql "set enable_nereids_planner = false;" - qt_sql_17 "select k1 from random_distributed_tbl_test_2 order by k1;" - qt_sql_18 "select distinct k1 from random_distributed_tbl_test_2 order by k1;" - qt_sql_19 "select k2 from random_distributed_tbl_test_2 order by k2;" - sql "set enable_nereids_planner = true;" qt_sql_20 "select k1 from random_distributed_tbl_test_2 order by k1;" qt_sql_21 "select distinct k1 from random_distributed_tbl_test_2 order by k1;" diff --git a/regression-test/suites/query_p0/dual/dual.groovy b/regression-test/suites/query_p0/dual/dual.groovy index eb001305b47e61..964ca49f3ab2c1 100644 --- a/regression-test/suites/query_p0/dual/dual.groovy +++ b/regression-test/suites/query_p0/dual/dual.groovy @@ -73,14 +73,6 @@ suite('dual') { exception "Table [dual] does not exist in database [regression_test_query_p0_dual]" } - // Disable and enable Nereids planner to check behavior differences - sql "set enable_nereids_planner = false" - test { - sql "select 1 from `dual`" - exception "Unknown table 'dual'" - } - sql "set enable_nereids_planner = true" - // Tests for unknown column errors test { sql "select a from dual" diff --git a/regression-test/suites/query_p0/sql_functions/array_functions/test_array_functions_by_literal.groovy b/regression-test/suites/query_p0/sql_functions/array_functions/test_array_functions_by_literal.groovy index afc88acd65128c..344c1935f4431f 100644 --- a/regression-test/suites/query_p0/sql_functions/array_functions/test_array_functions_by_literal.groovy +++ b/regression-test/suites/query_p0/sql_functions/array_functions/test_array_functions_by_literal.groovy @@ -17,7 +17,6 @@ suite("test_array_functions_by_literal") { // array_nested function - sql """ set enable_nereids_planner = false; """ qt_sql "select a from (select array(1, 1, 2, 2, 2, 2) as a) t" // array with decimal and other types @@ -405,41 +404,28 @@ suite("test_array_functions_by_literal") { qt_sql "select array_cum_sum(array(cast (11.9999 as decimalv3(6,4)),cast (22.0001 as decimalv3(6,4))))" // abnormal test - try { + test { sql "select array_intersect([1, 2, 3, 1, 2, 3], '1[3, 2, 5]')" - } catch (Exception ex) { - assert("${ex}".contains("errCode = 2, detailMessage = No matching function with signature: array_intersect")) + exception "Can not find the compatibility function signature: array_intersect" } // array_min/max with nested array for args test { sql "select array_min(array(1,2,3),array(4,5,6));" - check{result, exception, startTime, endTime -> - assertTrue(exception != null) - logger.info(exception.message) - } + exception "" } test { sql "select array_max(array(1,2,3),array(4,5,6));" - check{result, exception, startTime, endTime -> - assertTrue(exception != null) - logger.info(exception.message) - } + exception "" } test { sql "select array_min(array(split_by_string('a,b,c',',')));" - check{result, exception, startTime, endTime -> - assertTrue(exception != null) - logger.info(exception.message) - } + exception "" } test { sql "select array_max(array(split_by_string('a,b,c',',')));" - check{result, exception, startTime, endTime -> - assertTrue(exception != null) - logger.info(exception.message) - } + exception "" } // array_map with string is can be succeed @@ -448,6 +434,6 @@ suite("test_array_functions_by_literal") { // array_apply with string should be failed test { sql """select array_apply(split_by_string("amory,is,better,committing", ","), '!=', '');""" - exception("No matching function") + exception("array_apply does not support type") } } diff --git a/regression-test/suites/query_p0/sql_functions/cast_function/test_cast_map_function.groovy b/regression-test/suites/query_p0/sql_functions/cast_function/test_cast_map_function.groovy index d412e0d8f3741d..14fa8ee4b142a1 100644 --- a/regression-test/suites/query_p0/sql_functions/cast_function/test_cast_map_function.groovy +++ b/regression-test/suites/query_p0/sql_functions/cast_function/test_cast_map_function.groovy @@ -16,7 +16,6 @@ // under the License. suite("test_cast_map_function", "query") { - sql """set enable_nereids_planner = false """ def tableName = "tbl_test_cast_map_function" // array functions only supported in vectorized engine diff --git a/regression-test/suites/query_p0/sql_functions/conditional_functions/test_coalesce_new.groovy b/regression-test/suites/query_p0/sql_functions/conditional_functions/test_coalesce_new.groovy index 834dcbd16b558a..44a17d66b65258 100644 --- a/regression-test/suites/query_p0/sql_functions/conditional_functions/test_coalesce_new.groovy +++ b/regression-test/suites/query_p0/sql_functions/conditional_functions/test_coalesce_new.groovy @@ -44,32 +44,10 @@ suite("test_coalesce_new") { insert into test_cls values (1,'Alice','2023-06-01 12:00:00'),(2,'Bob','2023-06-02 12:00:00'),(3,'Carl','2023-05-01 14:00:00') """ - sql """ - SET enable_nereids_planner=false - """ - def result1 = try_sql """ - select dt from test_cls where coalesce (dt, str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' - """ - assertEquals(result1.size(), 2); - def result11 = try_sql """ - select dt from test_cls where coalesce (dt, dt, str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' - """ - assertEquals(result11.size(), 2); - def result12 = try_sql """ - select dt from test_cls where coalesce (dt, str_to_date(concat('202306', '01'), '%Y%m%d'), str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' - """ - assertEquals(result12.size(), 2); - //test enable_date_conversion=true and enable_nereids sql """ admin set frontend config ("enable_date_conversion"="true") """ - sql """ - SET enable_nereids_planner=true - """ - sql """ - SET enable_fallback_to_original_planner=false - """ def result13 = try_sql """ select dt from test_cls where coalesce (dt, str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' """ @@ -123,29 +101,6 @@ suite("test_coalesce_new") { insert into test_cls_dtv2 values (1,'Alice','2023-06-01 12:00:00'),(2,'Bob','2023-06-02 12:00:00'),(3,'Carl','2023-05-01 14:00:00') """ - sql """ - SET enable_nereids_planner=false - """ - def result2 = try_sql """ - select dt from test_cls_dtv2 where coalesce (dt, str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' - """ - assertEquals(result2.size(), 2); - def result21 = try_sql """ - select dt from test_cls_dtv2 where coalesce (dt, dt, str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' - """ - assertEquals(result21.size(), 2); - def result22 = try_sql """ - select dt from test_cls_dtv2 where coalesce (dt, str_to_date(concat('202306', '01'), '%Y%m%d'), str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' - """ - assertEquals(result22.size(), 2); - - //test enable_date_conversion=true and enable_nereids - sql """ - SET enable_nereids_planner=true - """ - sql """ - SET enable_fallback_to_original_planner=false - """ def result23 = try_sql """ select dt from test_cls_dtv2 where coalesce (dt, str_to_date(concat('202306', '01'), '%Y%m%d')) >= '2023-06-01' """ diff --git a/regression-test/suites/query_p0/sql_functions/datetime_functions/test_from_iso8601_date.groovy b/regression-test/suites/query_p0/sql_functions/datetime_functions/test_from_iso8601_date.groovy new file mode 100644 index 00000000000000..52c7bb0da34cca --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/datetime_functions/test_from_iso8601_date.groovy @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_from_iso8601_date") { + + def dbName = "test_from_iso8601_date" + sql "DROP DATABASE IF EXISTS ${dbName}" + sql "CREATE DATABASE ${dbName}" + sql "USE $dbName" + + + qt_test_31 """SELECT from_iso8601_date('0000');""" // 0000-01-01 + qt_test_32 """SELECT from_iso8601_date('0001');""" // 0001-01-01 + qt_test_33 """SELECT from_iso8601_date('1900');""" // 1900-01-01 + qt_test_34 """SELECT from_iso8601_date('1970');""" // 1970-01-01 + qt_test_35 """SELECT from_iso8601_date('9999');""" // 9999-01-01 + + qt_test_36 """SELECT from_iso8601_date('0000-01-01');""" // 0000-01-01 + qt_test_37 """SELECT from_iso8601_date('0000-02-28');""" // 0000-02-28 + qt_test_38 """SELECT from_iso8601_date('0001-02-28');""" // 0001-02-28 + qt_test_39 """SELECT from_iso8601_date('1900-02-28');""" // 1900-02-28 + qt_test_40 """SELECT from_iso8601_date('1970-01-01');""" // 1970-01-01 + qt_test_41 """SELECT from_iso8601_date('9999-12-31');""" // 9999-12-31 + + qt_test_42 """SELECT from_iso8601_date('00000228');""" // 0000-02-28 + qt_test_43 """SELECT from_iso8601_date('00010228');""" // 0001-02-28 + qt_test_44 """SELECT from_iso8601_date('19000228');""" // 1900-02-28 + qt_test_45 """SELECT from_iso8601_date('19700101');""" // 1970-01-01 + qt_test_46 """SELECT from_iso8601_date('99991231');""" // 9999-12-31 + + qt_test_47 """SELECT from_iso8601_date('0000-01');""" // 0000-01-01 + qt_test_48 """SELECT from_iso8601_date('0000-02');""" // 0000-02-01 + qt_test_49 """SELECT from_iso8601_date('0001-03');""" // 0001-03-01 + qt_test_50 """SELECT from_iso8601_date('1900-03');""" // 1900-03-01 + qt_test_51 """SELECT from_iso8601_date('1970-01');""" // 1970-01-01 + qt_test_52 """SELECT from_iso8601_date('9999-12');""" // 9999-12-01 + + qt_test_53 """SELECT from_iso8601_date('0000-W01');""" // 0000-01-03 + qt_test_54 """SELECT from_iso8601_date('0000-W09');""" // 0000-02-28 + qt_test_55 """SELECT from_iso8601_date('0001-W09');""" // 0001-02-26 + qt_test_56 """SELECT from_iso8601_date('1900-W08');""" // 1900-02-19 + qt_test_57 """SELECT from_iso8601_date('1970-W01');""" // 1969-12-29 + qt_test_58 """SELECT from_iso8601_date('9999-W52');""" // 9999-12-27 + + + qt_test_59 """SELECT from_iso8601_date('0000-W01-1');""" // 0000-01-03 + qt_test_60 """SELECT from_iso8601_date('0000-W09-6');""" // 0000-03-04 0000-03-05 + qt_test_61 """SELECT from_iso8601_date('0001-W09-6');""" // 0001-03-03 + qt_test_62 """SELECT from_iso8601_date('1900-W08-7');""" // 1900-02-25 + qt_test_63 """SELECT from_iso8601_date('1970-W01-1');""" // 1969-12-29 + qt_test_64 """SELECT from_iso8601_date('9999-W52-5');""" // 9999-12-31 + + + + + qt_test_65 """SELECT from_iso8601_date('0000-059');""" // 0000-02-28 + qt_test_66 """SELECT from_iso8601_date('0001-060');""" // 0001-03-01 + qt_test_67 """SELECT from_iso8601_date('1900-059');""" // 1900-02-28 + qt_test_68 """SELECT from_iso8601_date('1970-001');""" // 1970-01-01 + qt_test_69 """SELECT from_iso8601_date('9999-365');""" // 9999-12-31 + + qt_test_70 """SELECT from_iso8601_date('0000-060');""" // 0000-02-29 0000-03-01 + qt_test_71 """SELECT from_iso8601_date('0000-061');""" // 0000-03-01 0000-03-02 + qt_test_72 """SELECT from_iso8601_date('0000-062');""" // 0000-03-02 0000-03-03 + + qt_test_73 """SELECT from_iso8601_date('0000-02-29');""" // 0000-02-29 NULL + qt_test_74 """SELECT from_iso8601_date('0000-03-01');""" // 0000-03-01 + qt_test_75 """SELECT from_iso8601_date('0001-02-29');""" // NULL + qt_test_76 """SELECT from_iso8601_date('0001-03-01');""" // 0001-03-01 + + qt_test_77 """SELECT from_iso8601_date('1900-02-29');""" // NULL + qt_test_78 """SELECT from_iso8601_date('1900-03-01');""" // 1900-03-01 + qt_test_79 """SELECT from_iso8601_date('1970-02-28');""" // 1970-02-28 + qt_test_80 """SELECT from_iso8601_date('1970-03-01');""" // 1970-03-01 + qt_test_81 """SELECT from_iso8601_date('9999-02-29');""" // NULL + qt_test_82 """SELECT from_iso8601_date('9999-03-01');""" // 9999-03-01 + + qt_test_83 """SELECT from_iso8601_date('2009-W01-1');""" // 2008-12-29 + qt_test_84 """SELECT from_iso8601_date('2009-W53-7')""" // 2010-01-03 + + qt_test_85 """SELECT from_iso8601_date(NULL);""" + qt_test_86 """SELECT from_iso8601_date(nullable("2023-04-05"));""" + + + qt_test_101 """ SELECT from_iso8601_date("20230");""" + qt_test_102 """ SELECT from_iso8601_date("0230");""" + qt_test_103 """ SELECT from_iso8601_date("202334");""" + qt_test_104 """ SELECT from_iso8601_date("902030");""" + qt_test_105 """ SELECT from_iso8601_date("2003--33");""" + qt_test_106 """ SELECT from_iso8601_date("abcdd");""" + qt_test_107 """ SELECT from_iso8601_date("7855462");""" + qt_test_108 """ SELECT from_iso8601_date("010-03-02");""" + qt_test_109 """ SELECT from_iso8601_date("2021/03/04");""" + qt_test_110 """ SELECT from_iso8601_date("2121W1");""" + qt_test_111 """ SELECT from_iso8601_date("2121W00");""" + qt_test_112 """ SELECT from_iso8601_date("ssss");""" + qt_test_113 """ SELECT from_iso8601_date("5555555");""" + qt_test_114 """ SELECT from_iso8601_date("555500");""" + qt_test_115 """ SELECT from_iso8601_date("5555001");""" + qt_test_116 """ SELECT from_iso8601_date("5555W001");""" + qt_test_116 """ SELECT from_iso8601_date("5555-001");""" + qt_test_117 """ SELECT from_iso8601_date("5555-W001");""" + qt_test_118 """ SELECT from_iso8601_date("555-001");""" + qt_test_119 """ SELECT from_iso8601_date("99999-02-01");""" + qt_test_120 """ SELECT from_iso8601_date("");""" + + + + sql """ + CREATE TABLE IF NOT EXISTS `tb2` ( + `k0` int null comment "", + + `k1` string, + `k2` char(10), + `k3` varchar(10), + + `k11` string not null , + `k22` char(10) not null , + `k33` varchar(10) not null + + ) engine=olap + DISTRIBUTED BY HASH(`k0`) BUCKETS 5 properties("replication_num" = "1") + """ + sql """insert into tb2 values (1, "2023-02-03","2023-02-03","2023-02-03" , "2023-02-03","2023-02-03","2023-02-03" );""" + sql """insert into tb2 values (2, null,null,null, "2023-02-03","2023-02-03","2023-02-03" );""" + + qt_test_87 """ select from_iso8601_date(k1),from_iso8601_date(k2),from_iso8601_date(k3),from_iso8601_date(k11),from_iso8601_date(k22),from_iso8601_date(k33) from tb2 order by k0;""" + qt_test_88 """ select from_iso8601_date(nullable(k1)),from_iso8601_date(k2),from_iso8601_date(k3),from_iso8601_date(nullable(k11)),from_iso8601_date(k22),from_iso8601_date(k33) from tb2 order by k0; """ + qt_test_89 """ select from_iso8601_date(NULL) from tb2 order by k0; """ + + + + sql """ drop table tb2 """ + +} \ No newline at end of file diff --git a/regression-test/suites/query_p0/sql_functions/datetime_functions/test_to_iso8601.groovy b/regression-test/suites/query_p0/sql_functions/datetime_functions/test_to_iso8601.groovy new file mode 100644 index 00000000000000..0fc795e348e800 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/datetime_functions/test_to_iso8601.groovy @@ -0,0 +1,148 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_to_iso8601") { + + def dbName = "test_iso8601" + sql "DROP DATABASE IF EXISTS ${dbName}" + sql "CREATE DATABASE ${dbName}" + sql "USE $dbName" + + sql """ + CREATE TABLE IF NOT EXISTS `tb` ( + `id` int null comment "", + + `k1` date null comment "", + `k2` DATETIME null comment "", + `k3` DATETIME(1) null comment "", + `k4` DATETIME(2) null comment "", + `k5` DATETIME(3) null comment "", + `k6` DATETIME(4) null comment "", + `k7` DATETIME(6) null comment "", + + `k11` date not null comment "", + `k22` DATETIME not null comment "", + `k33` DATETIME(1) not null comment "", + `k44` DATETIME(2) not null comment "", + `k55` DATETIME(3) not null comment "", + `k66` DATETIME(4) not null comment "", + `k77` DATETIME(6) not null comment "" + ) engine=olap + DISTRIBUTED BY HASH(`k1`) BUCKETS 5 properties("replication_num" = "1") + """ + + sql """ insert into tb values (1, cast( '2023-04-05' as date ), + cast( '2023-04-05 03:04:05' as DATETIME), cast( '2023-04-05 03:04:05' as DATETIME(1) ),cast( '2023-04-05 03:04:05' as DATETIME(2) ),cast( '2023-04-05 03:04:05' as DATETIME(3) ), + cast( '2023-04-05 03:04:05' as DATETIME(4) ),cast( '2023-04-05 03:04:05' as DATETIME(6) ), + cast( '2023-04-05' as date ), + cast( '2023-04-05 03:04:05' as DATETIME), cast( '2023-04-05 03:04:05' as DATETIME(1) ),cast( '2023-04-05 03:04:05' as DATETIME(2) ),cast( '2023-04-05 03:04:05' as DATETIME(3) ), + cast( '2023-04-05 03:04:05' as DATETIME(4) ),cast( '2023-04-05 03:04:05' as DATETIME(6) ) + ); + """ + + sql """ + insert into tb values (2,cast( '2023-04-05' as date ),cast( '2023-04-05 03:04:05' as DATETIME ),cast( '2023-04-05 03:04:05.1' as DATETIME(1) ),cast( '2023-04-05 03:04:05.12' as DATETIME(2) ), + cast( '2023-04-05 03:04:05.123' as DATETIME(3) ),cast( '2023-04-05 03:04:05.1234' as DATETIME(4) ),cast( '2023-04-05 03:04:05.123456' as DATETIME(6) ),cast( '2023-04-05' as date ),cast( '2023-04-05 03:04:05' as DATETIME ), + cast( '2023-04-05 03:04:05.1' as DATETIME(1) ),cast( '2023-04-05 03:04:05.12' as DATETIME(2) ),cast( '2023-04-05 03:04:05.123' as DATETIME(3) ),cast( '2023-04-05 03:04:05.1234' as DATETIME(4) ), + cast( '2023-04-05 03:04:05.123456' as DATETIME(6) ) + ); """ + + + sql """ + insert into tb values (3,cast( '2023-04-05' as date ),cast( '2023-04-05 03:04:05' as DATETIME ), + cast( '2023-04-05 03:04:05.1' as DATETIME(1) ),cast( '2023-04-05 03:04:05.1' as DATETIME(2) ),cast( '2023-04-05 03:04:05.1' as DATETIME(3) ), + cast( '2023-04-05 03:04:05.1' as DATETIME(4) ),cast( '2023-04-05 03:04:05.1' as DATETIME(6) ),cast( '2023-04-05' as date ),cast( '2023-04-05 03:04:05' as DATETIME ), + cast( '2023-04-05 03:04:05.1' as DATETIME(1) ),cast( '2023-04-05 03:04:05.1' as DATETIME(2) ),cast( '2023-04-05 03:04:05.1' as DATETIME(3) ), + cast( '2023-04-05 03:04:05.1' as DATETIME(4) ),cast( '2023-04-05 03:04:05.1' as DATETIME(6) ) + );""" + + sql """ + insert into tb values (4,CAST('0000-01-03' AS DATE),CAST('0000-01-03 00:00:00' AS DATETIME),CAST('0000-01-03 00:00:00' AS DATETIME(1)),CAST('0000-01-03 00:00:00' AS DATETIME(2)), + CAST('0000-01-03 00:00:00' AS DATETIME(3)),CAST('0000-01-03 00:00:00' AS DATETIME(4)),CAST('0000-01-03 00:00:00' AS DATETIME(6)),CAST('0000-01-03' AS DATE),CAST('0000-01-03 00:00:00' AS DATETIME), + CAST('0000-01-03 00:00:00' AS DATETIME(1)),CAST('0000-01-03 00:00:00' AS DATETIME(2)),CAST('0000-01-03 00:00:00' AS DATETIME(3)),CAST('0000-01-03 00:00:00' AS DATETIME(4)),CAST('0000-01-03 00:00:00' AS DATETIME(6)) + );""" + + sql """ + insert into tb values (5,CAST('9999-12-31' AS DATE),CAST('9999-12-31 23:59:59' AS DATETIME),CAST('9999-12-31 23:59:59.9' AS DATETIME(1)), + CAST('9999-12-31 23:59:59.99' AS DATETIME(2)),CAST('9999-12-31 23:59:59.999' AS DATETIME(3)),CAST('9999-12-31 23:59:59.9999' AS DATETIME(4)), + CAST('9999-12-31 23:59:59.999999' AS DATETIME(6)),CAST('9999-12-31' AS DATE),CAST('9999-12-31 23:59:59' AS DATETIME),CAST('9999-12-31 23:59:59.9' AS DATETIME(1)), + CAST('9999-12-31 23:59:59.99' AS DATETIME(2)),CAST('9999-12-31 23:59:59.999' AS DATETIME(3)),CAST('9999-12-31 23:59:59.9999' AS DATETIME(4)),CAST('9999-12-31 23:59:59.999999' AS DATETIME(6)) + ); """ + + sql """ + insert into tb values (6,NULL,NULL,NULL,NULL,NULL,NULL,NULL,CAST('9999-12-31' AS DATE),CAST('9999-12-31 23:59:59' AS DATETIME),CAST('9999-12-31 23:59:59.9' AS DATETIME(1)), + CAST('9999-12-31 23:59:59.99' AS DATETIME(2)),CAST('9999-12-31 23:59:59.999' AS DATETIME(3)),CAST('9999-12-31 23:59:59.9999' AS DATETIME(4)),CAST('9999-12-31 23:59:59.999999' AS DATETIME(6)) + ); + """ + + + qt_test_1 """select to_iso8601(k1) from tb order by id;""" + qt_test_2 """select to_iso8601(k2) from tb order by id;""" + qt_test_3 """select to_iso8601(k3) from tb order by id;""" + qt_test_4 """select to_iso8601(k4) from tb order by id;""" + qt_test_5 """select to_iso8601(k5) from tb order by id;""" + qt_test_6 """select to_iso8601(k6) from tb order by id;""" + qt_test_7 """select to_iso8601(k7) from tb order by id;""" + + qt_test_8 """select to_iso8601(k11) from tb order by id;""" + qt_test_9 """select to_iso8601(k22) from tb order by id;""" + qt_test_10 """select to_iso8601(k33) from tb order by id;""" + qt_test_11 """select to_iso8601(k44) from tb order by id;""" + qt_test_12 """select to_iso8601(k55) from tb order by id;""" + qt_test_13 """select to_iso8601(k66) from tb order by id;""" + qt_test_14 """select to_iso8601(k77) from tb order by id;""" + + qt_test_7_2 """select to_iso8601(nullable(k7)) from tb order by id;""" + qt_test_14_2 """select to_iso8601(nullable(k77)) from tb order by id;""" + qt_test_14_2 """select to_iso8601(NULL) from tb order by id;""" + + + + sql """ drop table tb """ + + + + qt_test_15 """SELECT to_iso8601(CAST('2023-01-03' AS DATE));""" + qt_test_16 """SELECT to_iso8601(CAST('2023-01-03 00:00:00' AS DATETIME));""" + + qt_test_17 """SELECT to_iso8601(CAST('0000-01-03' AS DATE));""" + qt_test_18 """SELECT to_iso8601(CAST('0000-01-03 00:00:00' AS DATETIME));""" + + qt_test_19 """SELECT to_iso8601(CAST('0000-12-31' AS DATE));""" + qt_test_20 """SELECT to_iso8601(CAST('0000-12-31 23:59:59' AS DATETIME));""" + + qt_test_21 """SELECT to_iso8601(CAST('0000-02-28' AS DATE));""" + qt_test_22 """SELECT to_iso8601(CAST('0000-02-28 00:00:00' AS DATETIME));""" + + qt_test_23 """SELECT to_iso8601(CAST('0000-02-29' AS DATE));""" + qt_test_24 """SELECT to_iso8601(CAST('0000-02-29 00:00:00' AS DATETIME));""" + + qt_test_25 """SELECT to_iso8601(CAST('1900-02-28' AS DATE));""" + qt_test_26 """SELECT to_iso8601(CAST('1900-02-28 00:00:00' AS DATETIME));""" + + qt_test_27 """SELECT to_iso8601(CAST('9999-12-31' AS DATE));""" + qt_test_28 """SELECT to_iso8601(CAST('9999-12-31 23:59:59' AS DATETIME));""" + + qt_test_29 """SELECT to_iso8601(CAST('1970-01-01' AS DATE));""" + qt_test_30 """SELECT to_iso8601(CAST('1970-01-01 00:00:00' AS DATETIME));""" + + qt_test_31 """ SELECT to_iso8601(nullable(CAST('1970-01-01' AS DATE))); """ + qt_test_32 """ SELECT to_iso8601(nullable(NULL)); """ + qt_test_33 """ SELECT to_iso8601(NULL); """ + + +} \ No newline at end of file diff --git a/regression-test/suites/query_p0/sql_functions/encryption_digest/test_encryption_function.groovy b/regression-test/suites/query_p0/sql_functions/encryption_digest/test_encryption_function.groovy index 25095f46917c96..69933b6fd87db9 100644 --- a/regression-test/suites/query_p0/sql_functions/encryption_digest/test_encryption_function.groovy +++ b/regression-test/suites/query_p0/sql_functions/encryption_digest/test_encryption_function.groovy @@ -226,4 +226,7 @@ suite("test_encryption_function") { qt_sql54 """ select aes_decrypt(aes_encrypt(k,k1,k2, "AES_256_CFB"),k1,k2, "AES_256_CFB") from quantile_table2; """ qt_sql55 """ select aes_decrypt(aes_encrypt("zhang",k1,k2, "AES_256_CFB"),k1,k2, "AES_256_CFB") from quantile_table2; """ qt_sql56 """ select aes_decrypt(aes_encrypt("zhang",k1,k2, "AES_256_CFB"),k1,k2, "AES_256_CFB") from quantile_table2; """ + + //four arg (column/const) with wrong mode + qt_sql57 """ select sm4_decrypt(sm4_encrypt(k,"doris","abcdefghij", "SM4_128_CBC"),"doris","abcdefghij","SM4_555_CBC") from quantile_table2; """ } diff --git a/regression-test/suites/query_p0/sql_functions/json_functions/json_search.groovy b/regression-test/suites/query_p0/sql_functions/json_functions/json_search.groovy new file mode 100644 index 00000000000000..0872758cd12fa0 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/json_functions/json_search.groovy @@ -0,0 +1,121 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_json_search") { + def dbName = "test_json_search_db" + List> db = sql """show databases like '${dbName}'""" + if (db.size() == 0) { + sql """CREATE DATABASE ${dbName}""" + } + sql """use ${dbName}""" + + def testTable = "test_json_search" + + sql """ + CREATE TABLE `${testTable}` ( + `id` int NULL, + `j` varchar(1000) NULL, + `jb` json NULL, + `o` varchar(1000) NULL, + `p` varchar(1000) NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + def jsonValue = """'["A",[{"B":"1"}],{"C":"AB"},{"D":"BC"}]'""" + + sql """insert into ${testTable} values(1, $jsonValue, $jsonValue, NULL, '_%')""" + sql """insert into ${testTable} values(2, $jsonValue, $jsonValue, 'one', '_%')""" + sql """insert into ${testTable} values(3, $jsonValue, $jsonValue, 'One', '_%')""" + sql """insert into ${testTable} values(4, $jsonValue, $jsonValue, 'all', '_%')""" + sql """insert into ${testTable} values(5, $jsonValue, $jsonValue, 'All', '_%')""" + sql """insert into ${testTable} values(6, $jsonValue, $jsonValue, 'invalid_one_or_all', '_%')""" + sql """insert into ${testTable} values(7, NULL, NULL, 'one', '_%')""" + sql """insert into ${testTable} values(8, $jsonValue, $jsonValue, 'all', NULL)""" + sql """insert into ${testTable} values(9, $jsonValue, $jsonValue, 'all', 'X')""" + + qt_one_is_valid_or_null """ SELECT id, j, o, p, JSON_SEARCH(j, o, p), JSON_SEARCH(jb, o, p) + FROM ${testTable} WHERE o <> 'invalid_one_or_all' ORDER BY id;""" + test { + sql """SELECT id, j, o, p, JSON_SEARCH(j, o, p), JSON_SEARCH(jb, o, p) + FROM ${testTable} WHERE o = 'invalid_one_or_all' ORDER BY id;""" + exception "[INVALID_ARGUMENT]the one_or_all argument invalid_one_or_all is not 'one' not 'all'" + } + + qt_all_const1 """ SELECT JSON_SEARCH($jsonValue, 'one', '__')""" + qt_all_const1 """ SELECT JSON_SEARCH($jsonValue, 'One', '__')""" + qt_all_const2 """ SELECT JSON_SEARCH($jsonValue, 'all', '__')""" + qt_all_const2 """ SELECT JSON_SEARCH($jsonValue, 'All', '__')""" + qt_all_const3 """ SELECT JSON_SEARCH($jsonValue, 'one', 'A')""" + qt_all_const4 """ SELECT JSON_SEARCH($jsonValue, 'all', 'A')""" + qt_all_const5 """ SELECT JSON_SEARCH($jsonValue, 'one', 'A%')""" + qt_all_const6 """ SELECT JSON_SEARCH($jsonValue, 'one', 'A_')""" + qt_all_const7 """ SELECT JSON_SEARCH($jsonValue, 'one', 'X')""" + qt_all_const8 """ SELECT JSON_SEARCH($jsonValue, 'all', 'X')""" + + qt_one_is_one_const """ SELECT id, j, 'one', p, JSON_SEARCH(j, 'one', p), JSON_SEARCH(jb, 'one', p) + FROM ${testTable} ORDER BY id; """ + qt_one_is_all_const """ SELECT id, j, 'all', p, JSON_SEARCH(j, 'all', p), JSON_SEARCH(jb, 'all', p) + FROM ${testTable} ORDER BY id; """ + test { + sql """SELECT id, JSON_SEARCH(j, 'invalid_one_or_all', p), JSON_SEARCH(jb, 'invalid_one_or_all', p) + FROM ${testTable} ORDER BY id;""" + exception "[INVALID_ARGUMENT]the one_or_all argument invalid_one_or_all is not 'one' not 'all'" + } + + test { + sql """SELECT id, JSON_SEARCH(j, o, 'A'), JSON_SEARCH(jb, o, 'A') + FROM ${testTable} WHERE o = 'invalid_one_or_all' ORDER BY id;""" + exception "[INVALID_ARGUMENT]the one_or_all argument invalid_one_or_all is not 'one' not 'all'" + } + + test { + sql """SELECT id, j, o, p, JSON_SEARCH(j, o, NULL), JSON_SEARCH(jb, o, NULL) + FROM ${testTable} WHERE o = 'invalid_one_or_all' ORDER BY id;""" + exception "[INVALID_ARGUMENT]the one_or_all argument invalid_one_or_all is not 'one' not 'all'" + } + + qt_one_and_pattern_is_const1 """ SELECT id, j, 'one', 'A', JSON_SEARCH(j, 'one', 'A'), JSON_SEARCH(jb, 'one', 'A') + FROM ${testTable} ORDER BY id; """ + qt_one_and_pattern_is_const2 """ SELECT id, j, 'all', 'A', JSON_SEARCH(j, 'all', 'A'), JSON_SEARCH(jb, 'all', 'A') + FROM ${testTable} ORDER BY id; """ + + qt_one_and_pattern_is_nullconst """ SELECT id, j, NULL, NULL, JSON_SEARCH(j, NULL, NULL), JSON_SEARCH(jb, NULL, NULL) + FROM ${testTable} ORDER BY id; """ + + test { + sql """ SELECT id, $jsonValue, o, p, JSON_SEARCH($jsonValue, o, p) FROM ${testTable} + WHERE o = 'invalid_one_or_all' ORDER BY id;""" + exception "[INVALID_ARGUMENT]the one_or_all argument invalid_one_or_all is not 'one' not 'all'" + } + qt_json_const1 """ SELECT id, $jsonValue, 'one', p, JSON_SEARCH($jsonValue, 'one', p) FROM ${testTable} ORDER BY id; """ + qt_json_const2 """ SELECT id, $jsonValue, 'all', p, JSON_SEARCH($jsonValue, 'all', p) FROM ${testTable} ORDER BY id; """ + + test { + sql """ SELECT id, JSON_SEARCH($jsonValue, o, 'A') FROM ${testTable} + WHERE o = 'invalid_one_or_all' ORDER BY id;""" + exception "[INVALID_ARGUMENT]the one_or_all argument invalid_one_or_all is not 'one' not 'all'" + } + + qt_one_case1 """ SELECT id, $jsonValue, 'One', p, JSON_SEARCH($jsonValue, 'One', p) FROM ${testTable} ORDER BY id; """ + qt_one_case2 """ SELECT id, $jsonValue, 'All', p, JSON_SEARCH($jsonValue, 'One', p) FROM ${testTable} ORDER BY id; """ + + sql "drop table ${testTable}" +} diff --git a/regression-test/suites/query_p0/sql_functions/json_functions/test_mapagg_with_jsonfuncs.groovy b/regression-test/suites/query_p0/sql_functions/json_functions/test_mapagg_with_jsonfuncs.groovy index 24bfdcc413a770..d433787097ae31 100644 --- a/regression-test/suites/query_p0/sql_functions/json_functions/test_mapagg_with_jsonfuncs.groovy +++ b/regression-test/suites/query_p0/sql_functions/json_functions/test_mapagg_with_jsonfuncs.groovy @@ -22,7 +22,4 @@ suite("test_mapagg_with_jsonfuncs") { sql """ create table t003 (a bigint, b json not null) properties ("replication_num"="1"); """ sql """ insert into t003 values (1, '{"a":1,"b":2}'); """ qt_sql """ select a, map_agg("k1", json_quote(b)) from t003 group by a; """ - - sql "set enable_nereids_planner = false" - qt_sql """ select a, map_agg("k1", json_quote(b)) from t003 group by a; """ } diff --git a/regression-test/suites/query_p0/sql_functions/math_functions/test_normal_cdf.groovy b/regression-test/suites/query_p0/sql_functions/math_functions/test_normal_cdf.groovy new file mode 100644 index 00000000000000..8fcd7b0d23d7cd --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/math_functions/test_normal_cdf.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_normal_cdf") { + def dbName = "test_normal_cdf" + sql "DROP DATABASE IF EXISTS ${dbName}" + sql "CREATE DATABASE ${dbName}" + sql "USE $dbName" + + + + sql """DROP TABLE IF EXISTS `tb`""" + sql """ CREATE TABLE `tb` ( + `id` int , + + `k1` double , + `k2` double , + `k3` double , + + `k11` double not NULL, + `k22` double not NULL, + `k33` double not NULL + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 5 properties("replication_num" = "1"); + """ + + + + sql """ insert into `tb` values( 1, 0, 1, 1.96, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 2, 10, 9, 10, 10, 9, 10 ) """ // 0.5 + sql """ insert into `tb` values( 3, -1.5, 2.1, -7.8, -1.5, 2.1, -7.8) """ // 0.0013498980316301035 + + sql """ insert into `tb` values( 4, 0 , 0 , 1, 0 , 0 , 1 ) """ // NULL + sql """ insert into `tb` values( 5, 0 , -1 , 1, 0 , -1 , 1 ) """ // NULL + + + sql """ insert into `tb` values( 6, NULL, NULL, NULL, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 7, 0, NULL, NULL, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 8, 0, 1 , NULL, 0, 1, 1.96 ) """ // 0.9750021048517796 + + + sql """ insert into `tb` values( 9, 0, NULL, 1.96, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 10, 0, NULL, NULL, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 11, 0, NULL , 1.96, 0, 1, 1.96 ) """ // 0.9750021048517796 + + + sql """ insert into `tb` values( 12, NULL, 1, 1.96, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 13, NULL, 1, NULL, 0, 1, 1.96 ) """ // 0.9750021048517796 + sql """ insert into `tb` values( 14, NULL, 1 , 1.96, 0, 1, 1.96 ) """ // 0.9750021048517796 + + + qt_test_1 """ select normal_cdf(k1,k2,k3),normal_cdf(k11,k22,k33) from tb order by id """ + + qt_test_2 """ select normal_cdf(k1,k2,1.96),normal_cdf(k11,k22,1.96) from tb order by id """ + qt_test_3 """ select normal_cdf(0,k2,k3),normal_cdf(0,k22,k33) from tb order by id """ + qt_test_4 """ select normal_cdf(k1,1,k3),normal_cdf(k11,1,k33) from tb order by id """ + + qt_test_5 """ select normal_cdf(0,1,k3),normal_cdf(0,1,k33) from tb order by id """ + qt_test_6 """ select normal_cdf(k1,1,1.96),normal_cdf(k11,1,1.96) from tb order by id """ + qt_test_7 """ select normal_cdf(0,k2,1.96),normal_cdf(0,k2,1.96) from tb order by id """ + + qt_test_8 """ select normal_cdf(k1,k2,NULL),normal_cdf(k11,k22,NULL) from tb order by id """ + qt_test_9 """ select normal_cdf(NULL,k2,k3),normal_cdf(NULL,k22,k33) from tb order by id """ + qt_test_10 """ select normal_cdf(k1,NULL,k3),normal_cdf(k1,NULL,k33) from tb order by id """ + + qt_test_11 """ select normal_cdf(nullable(k1),NULL,k3),normal_cdf(nullable(0),NULL,k33) from tb order by id """ + + qt_test_12 """ select id,k1,k2,normal_cdf(0,1,1.96),normal_cdf(k1,k2,1.96),normal_cdf(k11,k22,1.96) from tb where id =1 ;""" + + qt_test_13 """ select normal_cdf( 0, 1, 1.96 ) ; """ + qt_test_14 """ select normal_cdf( nullable(0), 1, 1.96 ) ; """ + qt_test_15 """ select normal_cdf( nullable(0), nullable(1), 1.96 ) ; """ + qt_test_16 """ select normal_cdf( nullable(0), NULL , 1.96 ) ; """ + qt_test_17 """ select normal_cdf( nullable(0), NULL , 1.96 ) ; """ + qt_test_18 """ select normal_cdf( 0, 1 , NULL ) ; """ + qt_test_19 """ select normal_cdf( 0, -1,1 ) ; """ + + +} diff --git a/regression-test/suites/query_p0/sql_functions/size_funciton/test_size_function.groovy b/regression-test/suites/query_p0/sql_functions/size_funciton/test_size_function.groovy index 93f1b58158746c..fb897692790e34 100644 --- a/regression-test/suites/query_p0/sql_functions/size_funciton/test_size_function.groovy +++ b/regression-test/suites/query_p0/sql_functions/size_funciton/test_size_function.groovy @@ -16,7 +16,6 @@ // under the License. suite("test_size_function") { - sql """ set enable_nereids_planner = false; """ // literal qt_sql "SELECT size(array_shuffle(['aaa', null, 'bbb', 'fff'])), array_shuffle(['aaa', null, 'bbb', 'fff'], 0), shuffle(['aaa', null, 'bbb', 'fff'], 0)" diff --git a/regression-test/suites/query_p0/sql_functions/string_functions/test_split_by_string.groovy b/regression-test/suites/query_p0/sql_functions/string_functions/test_split_by_string.groovy index 2ec70e361242ce..6b7f32b5aae1b1 100644 --- a/regression-test/suites/query_p0/sql_functions/string_functions/test_split_by_string.groovy +++ b/regression-test/suites/query_p0/sql_functions/string_functions/test_split_by_string.groovy @@ -18,6 +18,7 @@ suite("test_split_by_string") { // split by char qt_sql "select split_by_string('abcde','');" + qt_sql "select split_by_string('你a好b世c界','');" qt_sql "select split_by_string('12553','');" qt_sql "select split_by_string('','');" qt_sql "select split_by_string('',',');" @@ -70,6 +71,7 @@ suite("test_split_by_string") { sql """ INSERT INTO ${tableName1} VALUES(9, 'a,b,c,', ',') """ sql """ INSERT INTO ${tableName1} VALUES(10, null, ',') """ sql """ INSERT INTO ${tableName1} VALUES(11, 'a,b,c,12345,', ',') """ + sql """ INSERT INTO ${tableName1} VALUES(12, '你a好b世c界', '') """ qt_sql "SELECT *, split_by_string(v1, v2) FROM ${tableName1} ORDER BY k1" diff --git a/regression-test/suites/query_p0/sql_functions/string_functions/test_string_function.groovy b/regression-test/suites/query_p0/sql_functions/string_functions/test_string_function.groovy index b71d339a5387a6..6e18fb57eeb4cf 100644 --- a/regression-test/suites/query_p0/sql_functions/string_functions/test_string_function.groovy +++ b/regression-test/suites/query_p0/sql_functions/string_functions/test_string_function.groovy @@ -228,6 +228,16 @@ suite("test_string_function", "arrow_flight_sql") { qt_sql "select substring('abcdef',3,-1);" qt_sql "select substring('abcdef',-3,-1);" qt_sql "select substring('abcdef',10,1);" + sql """ set debug_skip_fold_constant = true;""" + qt_substring_utf8_sql "select substring('中文测试',5);" + qt_substring_utf8_sql "select substring('中文测试',4);" + qt_substring_utf8_sql "select substring('中文测试',2,2);" + qt_substring_utf8_sql "select substring('中文测试',-1,2);" + sql """ set debug_skip_fold_constant = false;""" + qt_substring_utf8_sql "select substring('中文测试',5);" + qt_substring_utf8_sql "select substring('中文测试',4);" + qt_substring_utf8_sql "select substring('中文测试',2,2);" + qt_substring_utf8_sql "select substring('中文测试',-1,2);" sql """ drop table if exists test_string_function; """ sql """ create table test_string_function ( diff --git a/regression-test/suites/query_p0/sql_functions/string_functions/test_translate.groovy b/regression-test/suites/query_p0/sql_functions/string_functions/test_translate.groovy new file mode 100644 index 00000000000000..e63f42ae5b4cf5 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/string_functions/test_translate.groovy @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_translate") { + // this table has nothing todo. just make it eaiser to generate query + sql " drop table if exists hits_three_args " + sql """ create table hits_three_args( + nothing boolean + ) + properties("replication_num" = "1"); + """ + sql "insert into hits_three_args values(true);" + + sql " drop table if exists test_translate" + sql """ + create table test_translate ( + k0 int, + a varchar not null, + b varchar null, + ) + DISTRIBUTED BY HASH(k0) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + order_qt_empty_nullable "select translate(a, a, a) from test_translate" + order_qt_empty_not_nullable "select translate(b, b, b) from test_translate" + order_qt_empty_partial_nullable "select translate(a, b, b) from test_translate" + + sql """ insert into test_translate values (1, "", ""), (2, "中文", "中文"), (3, "123123", "123123"), + (4, "\\\\a\\\\b\\\\c\\\\d", "\\\\a\\\\b\\\\c\\\\d"), + (5, "!@#@#\$#^\$%%\$^", "!@#@#\$#^\$%%\$^"), (6, " ", " "), + (7, "", NULL); + """ + + order_qt_nullable """ + SELECT translate(t.test_translate, t.ARG2, t.ARG3) as result + FROM ( + SELECT hits_three_args.nothing, TABLE1.test_translate, TABLE1.order1, TABLE2.ARG2, TABLE2.order2, TABLE3.ARG3, TABLE3.order3 + FROM hits_three_args + CROSS JOIN ( + SELECT b as test_translate, k0 as order1 + FROM test_translate + ) as TABLE1 + CROSS JOIN ( + SELECT b as ARG2, k0 as order2 + FROM test_translate + ) as TABLE2 + CROSS JOIN ( + SELECT b as ARG3, k0 as order3 + FROM test_translate + ) as TABLE3 + )t; + """ + + /// nullables + order_qt_not_nullable "select translate(a, a, a) from test_translate" + order_qt_partial_nullable "select translate(a, b, b) from test_translate" + order_qt_nullable_no_null "select translate(a, nullable(a), nullable(a)) from test_translate" + + /// consts. most by BE-UT + order_qt_const_nullable "select translate(NULL, NULL, NULL) from test_translate" + order_qt_partial_const_nullable "select translate(NULL, b, b) from test_translate" + order_qt_const_not_nullable "select translate('a', 'b', 'c') from test_translate" + order_qt_const_other_nullable "select translate('x', b, b) from test_translate" + order_qt_const_other_not_nullable "select translate('x', 'x', a) from test_translate" + order_qt_const_nullable_no_null "select translate(nullable('abc'), nullable('中文'), nullable('xxx'))" + order_qt_const_partial_nullable_no_null "select translate('xyz', nullable('a'), nullable('a'))" + order_qt_const1 "select translate('xyz', a, b) from test_translate" + order_qt_const12 "select translate('xyz', 'abc', b) from test_translate" + order_qt_const23 "select translate(a, 'xyz', 'abc') from test_translate" + order_qt_const3 "select translate(b, a, 'abc') from test_translate" + + /// folding + def re_fe + def re_be + def re_no_fold + def check_three_ways = { test_sql -> + sql "set enable_fold_constant_by_be=false;" + re_fe = order_sql "select ${test_sql}" + sql "set enable_fold_constant_by_be=true;" + re_be = order_sql "select ${test_sql}" + sql "set debug_skip_fold_constant=true;" + re_no_fold = order_sql "select ${test_sql}" + logger.info("check on sql \${test_sql}") + assertEquals(re_fe, re_be) + assertEquals(re_fe, re_no_fold) + } + + check_three_ways "translate('abcd', '', '');" + check_three_ways "translate('abcda', 'a', 'z');" + check_three_ways "translate('abcd', 'ac', 'z');" + check_three_ways "translate('abcd', 'aac', 'zq');" + check_three_ways "translate('abcd', 'aac', 'zqx');" + check_three_ways "translate('abcd', 'aac', '中文x');" + check_three_ways "translate('中文', '中', '文');" + check_three_ways "translate('中文', '中', 'a');" + check_three_ways "translate('\tt\tt\tt', '\t', 't');" + + order_qt_1 "select translate('abcd', '', '');" + order_qt_2 "select translate('abcd', 'a', 'z')" + order_qt_3 "select translate('abcda', 'a', 'z');" + order_qt_4 "select translate('abcd', 'aac', 'zq');" + order_qt_5 "select translate('abcd', 'aac', 'zqx');" + order_qt_6 "select translate('abcd', 'aac', '中文x');" + order_qt_7 "select translate('中文', '中', '文');" + order_qt_8 "select translate('中文', '中', 'ab');" + order_qt_9 "select translate('\tt\tt\tt', '\t', 't');" +} diff --git a/regression-test/suites/query_p0/sql_functions/string_functions/test_url_decode.groovy b/regression-test/suites/query_p0/sql_functions/string_functions/test_url_decode.groovy new file mode 100644 index 00000000000000..dd5cb9d35213fd --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/string_functions/test_url_decode.groovy @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_url_decode") { + sql " drop table if exists test_url_decode" + sql """ + create table test_url_decode ( + k0 int, + a string not null, + b string null + ) + DISTRIBUTED BY HASH(k0) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + order_qt_empty_nullable "select url_decode(b) from test_url_decode" + order_qt_empty_not_nullable "select url_decode(a) from test_url_decode" + + sql """ insert into test_url_decode values (1, 'ABCDEFGHIJKLMNOPQRSTUWXYZ', 'ABCDEFGHIJKLMNOPQRSTUWXYZ'), (2, '1234567890', '1234567890'), + (3, '~%21%40%23%25%5E%26%2A%28%29%3C%3E%3F%2C.%2F%3A%7B%7D%7C%5B%5D%5C_%2B-%3D', '~%21%40%23%25%5E%26%2A%28%29%3C%3E%3F%2C.%2F%3A%7B%7D%7C%5B%5D%5C_%2B-%3D'), + (4, '', ''), (5, '%2Fhome%2Fdoris%2Fdirectory%2F', '%2Fhome%2Fdoris%2Fdirectory%2F'), (6, '', null); + """ + + order_qt_nullable "select url_decode(b) from test_url_decode" + order_qt_not_nullable "select url_decode(a) from test_url_decode" + order_qt_nullable_no_null "select url_decode(nullable(a)) from test_url_decode" + order_qt_const_nullable "select url_decode('') from test_url_decode" // choose one case to test const multi-rows + order_qt_const_not_nullable "select url_decode('%2Fhome%2Fdoris%2Fdirectory%2F')" + order_qt_const_nullable_no_null "select url_decode('%2Fhome%2Fdoris%2Fdirectory%2F')" +} diff --git a/regression-test/suites/query_p0/sql_functions/string_functions/test_url_encode.groovy b/regression-test/suites/query_p0/sql_functions/string_functions/test_url_encode.groovy new file mode 100644 index 00000000000000..18b8a615d5a53d --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/string_functions/test_url_encode.groovy @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_url_encode") { + sql " drop table if exists test_url_encode" + sql """ + create table test_url_encode ( + k0 int, + a string not null, + b string null + ) + DISTRIBUTED BY HASH(k0) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + order_qt_empty_nullable "select url_encode(b) from test_url_encode" + order_qt_empty_not_nullable "select url_encode(a) from test_url_encode" + + sql """ insert into test_url_encode values (1, 'ABCDEFGHIJKLMNOPQRSTUWXYZ', 'ABCDEFGHIJKLMNOPQRSTUWXYZ'), + (2, '1234567890', '1234567890'), (3, '~!@#%^&*()<>?,./:{}|[]\\_+-=', '~!@#%^&*()<>?,./:{}|[]\\_+-='), + (4, '', ''), (5, '/home/doris/directory/', '/home/doris/directory/'), (6, '', null); + """ + + order_qt_nullable "select url_encode(b) from test_url_encode" + order_qt_not_nullable "select url_encode(a) from test_url_encode" + order_qt_nullable_no_null "select url_encode(nullable(a)) from test_url_encode" + order_qt_const_nullable "select url_encode('') from test_url_encode" // choose one case to test const multi-rows + order_qt_const_not_nullable "select url_encode('/home/doris/directory/')" + order_qt_const_nullable_no_null "select url_encode('/home/doris/directory/')" +} diff --git a/regression-test/suites/query_p0/sql_functions/test_template_one_arg.groovy b/regression-test/suites/query_p0/sql_functions/test_template_one_arg.groovy new file mode 100644 index 00000000000000..078d100c70c647 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/test_template_one_arg.groovy @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_template_one_arg") { + sql " drop table if exists test_asin" + sql """ + create table test_asin ( + k0 int, + a double not null, + b double null + ) + DISTRIBUTED BY HASH(k0) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + order_qt_empty_nullable "select asin(b) from test_asin" + order_qt_empty_not_nullable "select asin(a) from test_asin" + + sql "insert into test_asin values (1, 1, null), (1, 1, null), (1, 1, null)" + order_qt_all_null "select asin(b) from test_asin" + + sql "truncate table test_asin" + sql """ insert into test_asin values (1, 1e-100, 1e-100), (2, -1e100, -1e100), (3, 1e100, 1e100), (4, 1, 1), (5, -1, -1), + (6, 0, 0), (7, -0, -0), (8, 123, 123), + (9, 0.1, 0.1), (10, -0.1, -0.1), (11, 1e-15, 1e-15), (12, 0, null); + """ + + order_qt_nullable "select asin(b) from test_asin" + order_qt_not_nullable "select asin(a) from test_asin" + order_qt_nullable_no_null "select asin(nullable(a)) from test_asin" + order_qt_const_nullable "select asin(NULL) from test_asin" // choose some cases to test const multi-rows + order_qt_const_not_nullable "select asin(0.5) from test_asin" + order_qt_const_nullable_no_null "select asin(nullable(0.5))" +} \ No newline at end of file diff --git a/regression-test/suites/query_p0/sql_functions/test_template_three_args.groovy b/regression-test/suites/query_p0/sql_functions/test_template_three_args.groovy new file mode 100644 index 00000000000000..13de80a0393024 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/test_template_three_args.groovy @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_template_three_args") { + // this table has nothing todo. just make it eaiser to generate query + sql " drop table if exists hits_three_args " + sql """ create table hits_three_args( + nothing boolean + ) + properties("replication_num" = "1"); + """ + sql "insert into hits_three_args values(true);" + + sql " drop table if exists arg1_three_args" + sql """ + create table arg1_three_args ( + k0 int, + a varchar not null, + b varchar null, + ) + DISTRIBUTED BY HASH(k0) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + order_qt_empty_nullable "select concat(a, a, a) from arg1_three_args" + order_qt_empty_not_nullable "select concat(b, b, b) from arg1_three_args" + order_qt_empty_partial_nullable "select concat(a, b, b) from arg1_three_args" + + sql "insert into arg1_three_args values (1, 1, null), (1, 1, null), (1, 1, null)" + order_qt_all_null "select concat(b, b, b ,b) from arg1_three_args" + + sql "truncate table arg1_three_args" + sql """ insert into arg1_three_args values (1, "", ""), (2, "中文", "中文"), (3, "123123", "123123"), + (4, "\\\\a\\\\b\\\\c\\\\d", "\\\\a\\\\b\\\\c\\\\d"), + (5, "!@#@#\$#^\$%%\$^", "!@#@#\$#^\$%%\$^"), (6, " ", " "), + (7, "", NULL); + """ + + order_qt_nullable """ + SELECT concat(t.arg1_three_args, t.ARG2, t.ARG3) as result + FROM ( + SELECT hits_three_args.nothing, TABLE1.arg1_three_args, TABLE1.order1, TABLE2.ARG2, TABLE2.order2, TABLE3.ARG3, TABLE3.order3 + FROM hits_three_args + CROSS JOIN ( + SELECT b as arg1_three_args, k0 as order1 + FROM arg1_three_args + ) as TABLE1 + CROSS JOIN ( + SELECT b as ARG2, k0 as order2 + FROM arg1_three_args + ) as TABLE2 + CROSS JOIN ( + SELECT b as ARG3, k0 as order3 + FROM arg1_three_args + ) as TABLE3 + )t; + """ + + /// nullables + order_qt_not_nullable "select concat(a, a, a) from arg1_three_args" + order_qt_partial_nullable "select concat(a, b, b) from arg1_three_args" + order_qt_nullable_no_null "select concat(a, nullable(a), nullable(a)) from arg1_three_args" + /// if you set `use_default_implementation_for_nulls` to false, add: + // order_qt_nullable1 " SELECT b as arg1_three_args...)as TABLE1 ... SELECT a as arg1_three_args...)as TABLE1 ... + // order_qt_nullable2 " SELECT a as arg1_three_args...)as TABLE1 ... SELECT b as arg1_three_args...)as TABLE1 ... + + /// consts. most by BE-UT + order_qt_const_nullable "select concat(NULL, NULL, NULL) from arg1_three_args" + order_qt_partial_const_nullable "select concat(NULL, b, b) from arg1_three_args" + order_qt_const_not_nullable "select concat('a', 'b', 'c') from arg1_three_args" + order_qt_const_other_nullable "select concat('x', b, b) from arg1_three_args" + order_qt_const_other_not_nullable "select concat('x', 'x', a) from arg1_three_args" + order_qt_const_nullable_no_null "select concat(nullable('abc'), nullable('中文'), nullable('xxx'))" + order_qt_const_partial_nullable_no_null "select concat('xyz', nullable('a'), nullable('a'))" + order_qt_const1 "select concat('xyz', a, b) from arg1_three_args" + order_qt_const12 "select concat('xyz', 'abc', b) from arg1_three_args" + order_qt_const23 "select concat(a, 'xyz', 'abc') from arg1_three_args" + order_qt_const3 "select concat(b, a, 'abc') from arg1_three_args" + + /// folding + def re_fe + def re_be + def re_no_fold + def check_three_ways = { test_sql -> + re_fe = order_sql "select/*+SET_VAR(enable_fold_constant_by_be=false)*/ ${test_sql}" + re_be = order_sql "select/*+SET_VAR(enable_fold_constant_by_be=true)*/ ${test_sql}" + re_no_fold = order_sql "select/*+SET_VAR(debug_skip_fold_constant=true)*/ ${test_sql}" + logger.info("check on sql \${test_sql}") + assertEquals(re_fe, re_be) + assertEquals(re_fe, re_no_fold) + } + + check_three_ways "concat('', '', '')" + check_three_ways "concat('\\t\\t', '\\t\\t', '\\t\\t')" + check_three_ways "concat('中文', '中文', '中文')" + check_three_ways "concat('abcde', 'abcde', 'abcde')" +} \ No newline at end of file diff --git a/regression-test/suites/query_p0/sql_functions/test_template_two_args.groovy b/regression-test/suites/query_p0/sql_functions/test_template_two_args.groovy new file mode 100644 index 00000000000000..9d7e2643eca1c4 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/test_template_two_args.groovy @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_template_two_args") { + // this table has nothing todo. just make it eaiser to generate query + sql " drop table if exists hits_two_args " + sql """ create table hits_two_args( + nothing boolean + ) + properties("replication_num" = "1"); + """ + sql "insert into hits_two_args values(true);" + + sql " drop table if exists arg1_two_args" + sql """ + create table arg1_two_args ( + k0 int, + a double not null, + b double null, + ) + DISTRIBUTED BY HASH(k0) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + order_qt_empty_nullable "select atan2(a, a) from arg1_two_args" + order_qt_empty_not_nullable "select atan2(b, b) from arg1_two_args" + order_qt_empty_partial_nullable "select atan2(a, b) from arg1_two_args" + + sql "insert into arg1_two_args values (1, 1, null), (1, 1, null), (1, 1, null)" + order_qt_all_null "select atan2(b, b) from arg1_two_args" + + sql "truncate table arg1_two_args" + sql """ insert into arg1_two_args values (1, 1e-100, 1e-100), (2, -1e100, -1e100), (3, 1e100, 1e100), (4, 1, 1), (5, -1, -1), + (6, 0, 0), (7, -0, -0), (8, 123, 123), + (9, 0.1, 0.1), (10, -0.1, -0.1), (11, 1e-15, 1e-15), (12, 0, null); + """ + + /// all values + order_qt_nullable """ + SELECT atan2(t.arg1_two_args, t.ARG2) as result + FROM ( + SELECT hits_two_args.nothing, TABLE1.arg1_two_args, TABLE1.order1, TABLE2.ARG2, TABLE2.order2 + FROM hits_two_args + CROSS JOIN ( + SELECT b as arg1_two_args, k0 as order1 + FROM arg1_two_args + ) as TABLE1 + CROSS JOIN ( + SELECT b as ARG2, k0 as order2 + FROM arg1_two_args + ) as TABLE2 + )t; + """ + + /// nullables + order_qt_not_nullable "select atan2(a, a) from arg1_two_args" + order_qt_partial_nullable "select atan2(a, b) from arg1_two_args" + order_qt_nullable_no_null "select atan2(a, nullable(a)) from arg1_two_args" + /// if you set `use_default_implementation_for_nulls` to false, add: + // order_qt_nullable1 " SELECT b as arg1_two_args...)as TABLE1 ... SELECT a as arg1_two_args...)as TABLE1 + // order_qt_nullable2 " SELECT a as arg1_two_args...)as TABLE1 ... SELECT b as arg1_two_args...)as TABLE1 + + /// consts. most by BE-UT + order_qt_const_nullable "select atan2(NULL, NULL) from arg1_two_args" + order_qt_partial_const_nullable "select atan2(NULL, b) from arg1_two_args" + order_qt_const_not_nullable "select atan2(0.5, 100) from arg1_two_args" + order_qt_const_other_nullable "select atan2(10, b) from arg1_two_args" + order_qt_const_other_not_nullable "select atan2(a, 10) from arg1_two_args" + order_qt_const_nullable_no_null "select atan2(nullable(1e100), nullable(1e-10))" + order_qt_const_nullable_no_null_multirows "select atan2(nullable(1e100), nullable(1e-10))" + order_qt_const_partial_nullable_no_null "select atan2(1e100, nullable(1e-10))" + + /// folding + def re_fe + def re_be + def re_no_fold + def check_three_ways = { test_sql -> + re_fe = order_sql "select/*+SET_VAR(enable_fold_constant_by_be=false)*/ ${test_sql}" + re_be = order_sql "select/*+SET_VAR(enable_fold_constant_by_be=true)*/ ${test_sql}" + re_no_fold = order_sql "select/*+SET_VAR(debug_skip_fold_constant=true)*/ ${test_sql}" + logger.info("check on sql ${test_sql}") + assertEquals(re_fe, re_be) + assertEquals(re_fe, re_no_fold) + } + + check_three_ways "atan2(-1, -2)" + check_three_ways "atan2(-1e100, 3.14)" + check_three_ways "atan2(0, 0)" + check_three_ways "atan2(1e100, 1e100)" + check_three_ways "atan2(-0.5, 0.5)" +} \ No newline at end of file diff --git a/regression-test/suites/query_p0/sql_functions/window_functions/test_partition_topn.groovy b/regression-test/suites/query_p0/sql_functions/window_functions/test_partition_topn.groovy new file mode 100644 index 00000000000000..75bf4b35636d56 --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/window_functions/test_partition_topn.groovy @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partition_topn") { + sql """ DROP TABLE IF EXISTS test_partition_topn """ + sql """ + CREATE TABLE IF NOT EXISTS test_partition_topn ( + u_id int NULL COMMENT "", + u_city varchar(20) NULL COMMENT "", + u_salary int NULL COMMENT "" + ) ENGINE=OLAP + DUPLICATE KEY(`u_id`, `u_city`, `u_salary`) + DISTRIBUTED BY HASH(`u_id`, `u_city`, `u_salary`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "in_memory" = "false", + "storage_format" = "V2" + ); + """ + + sql """ + INSERT INTO test_partition_topn(u_id, u_city, u_salary) VALUES + ('1', 'gz', 30000), + ('2', 'gz', 25000), + ('3', 'gz', 17000), + ('4', 'gz', 32000), + ('5', 'gz', 30000), + ('6', 'gz', 25000), + ('7', 'gz', 17000), + ('8', 'gz', 32000), + ('9', 'gz', 32000), + ('10', 'gz', 32000), + ('11', 'gz', 32000), + ('12', 'sz', 30000), + ('13', 'sz', 25000), + ('14', 'sz', 17000), + ('15', 'sz', 32000), + ('16', 'sz', 30000), + ('16', 'sz', 25000), + ('17', 'sz', 17000), + ('18', 'sz', 32000), + ('19', 'sz', 32000), + ('20', 'sz', 32000), + ('21', 'sz', 32000); + """ + + sql """ set parallel_pipeline_task_num = 1; """ + + qt_sql_topn """ select * from (select u_id, row_number() over(partition by u_city order by u_id) as rn from test_partition_topn)t where rn = 1 order by 1; """ +} + + diff --git a/regression-test/suites/query_p0/sql_functions/window_functions/test_select_stddev_variance_window.groovy b/regression-test/suites/query_p0/sql_functions/window_functions/test_select_stddev_variance_window.groovy index 7ec02d90ae130c..0933866b0a42c8 100644 --- a/regression-test/suites/query_p0/sql_functions/window_functions/test_select_stddev_variance_window.groovy +++ b/regression-test/suites/query_p0/sql_functions/window_functions/test_select_stddev_variance_window.groovy @@ -148,18 +148,6 @@ suite("test_select_stddev_variance_window") { qt_select_default "select k1, percentile_approx(k2,0.5,4096) over (partition by k6 order by k1 rows between current row and unbounded following) from ${tableName} order by k1;" qt_select_default "select k1, percentile_approx(k2,0.5,4096) over (partition by k6 order by k1) from ${tableName} order by k1;" - sql "set experimental_enable_nereids_planner = false;" - - qt_sql_row_number_1 """ - select * from (select row_number() over(partition by k2 order by k6) as rk,k2,k6 from ${tableName}) as t where rk = 1 order by 1,2,3; - """ - qt_sql_rank_1 """ - select * from (select rank() over(partition by k2 order by k6) as rk,k2,k6 from ${tableName}) as t where rk = 1 order by 1,2,3; - """ - qt_sql_dense_rank_1 """ - select * from (select dense_rank() over(partition by k2 order by k6) as rk,k2,k6 from ${tableName}) as t where rk = 1 order by 1,2,3; - """ - sql "set experimental_enable_nereids_planner = true;" qt_sql_row_number """ diff --git a/regression-test/suites/query_p0/system/test_partitions_schema.groovy b/regression-test/suites/query_p0/system/test_partitions_schema.groovy index 0215bc8756776a..0cf83d67e66995 100644 --- a/regression-test/suites/query_p0/system/test_partitions_schema.groovy +++ b/regression-test/suites/query_p0/system/test_partitions_schema.groovy @@ -20,7 +20,12 @@ import static java.util.concurrent.TimeUnit.SECONDS suite("test_partitions_schema") { def dbName = "test_partitions_schema_db" - def listOfColum = "TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,SUBPARTITION_NAME,PARTITION_ORDINAL_POSITION,SUBPARTITION_ORDINAL_POSITION,PARTITION_METHOD,SUBPARTITION_METHOD,PARTITION_EXPRESSION,SUBPARTITION_EXPRESSION,PARTITION_DESCRIPTION,TABLE_ROWS,AVG_ROW_LENGTH,DATA_LENGTH,MAX_DATA_LENGTH,INDEX_LENGTH,DATA_FREE,CHECKSUM,PARTITION_COMMENT,NODEGROUP,TABLESPACE_NAME"; + // row count skipped in common validation + // if table report not done for the all tablet + // row count will be -1 if done can get actual data. + // so we added one check waiting for rowcount and all other case we skipped row count + // checkpoint. + def listOfColum = "TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,SUBPARTITION_NAME,PARTITION_ORDINAL_POSITION,SUBPARTITION_ORDINAL_POSITION,PARTITION_METHOD,SUBPARTITION_METHOD,PARTITION_EXPRESSION,SUBPARTITION_EXPRESSION,PARTITION_DESCRIPTION"; sql "drop database if exists ${dbName}" sql "CREATE DATABASE IF NOT EXISTS ${dbName}" sql "use ${dbName}" @@ -156,11 +161,12 @@ suite("test_partitions_schema") { "row_store_page_size" = "8190" ); """ - qt_select_check_1 """select $listOfColum from information_schema.partitions where table_schema=\"${dbName}\" order by $listOfColum""" + + order_qt_select_check_1 """select $listOfColum from information_schema.partitions where table_schema=\"${dbName}\"""" sql """ drop table test_row_column_page_size2; """ - qt_select_check_2 """select $listOfColum from information_schema.partitions where table_schema=\"${dbName}\" order by $listOfColum""" + order_qt_select_check_2 """select $listOfColum from information_schema.partitions where table_schema=\"${dbName}\"""" def user = "partitions_user" sql "DROP USER IF EXISTS ${user}" @@ -179,7 +185,7 @@ suite("test_partitions_schema") { def url=tokens[0] + "//" + tokens[2] + "/" + "information_schema" + "?" connect(user=user, password='123abc!@#', url=url) { - qt_select_check_3 """select $listOfColum from information_schema.partitions where table_schema=\"${dbName}\" order by $listOfColum""" + order_qt_select_check_3 """select $listOfColum from information_schema.partitions where table_schema=\"${dbName}\"""" } sql "GRANT SELECT_PRIV ON ${dbName}.duplicate_table TO ${user}" diff --git a/regression-test/suites/query_p0/system/test_query_sys.groovy b/regression-test/suites/query_p0/system/test_query_sys.groovy index dd7998b010f9c8..7b6ca1027b479c 100644 --- a/regression-test/suites/query_p0/system/test_query_sys.groovy +++ b/regression-test/suites/query_p0/system/test_query_sys.groovy @@ -43,6 +43,8 @@ suite("test_query_sys", "query,p0") { sql "select pi();" sql "select e();" sql "select sleep(2);" + sql "select last_query_id();" + sql "select LAST_QUERY_ID();" // INFORMATION_SCHEMA sql "SELECT table_name FROM INFORMATION_SCHEMA.TABLES where table_schema=\"test_query_db\" and TABLE_TYPE = \"BASE TABLE\" order by table_name" diff --git a/regression-test/suites/query_p0/system/test_table_properties.groovy b/regression-test/suites/query_p0/system/test_table_properties.groovy index 7dd55d4fb78a7f..3314975d689c0f 100644 --- a/regression-test/suites/query_p0/system/test_table_properties.groovy +++ b/regression-test/suites/query_p0/system/test_table_properties.groovy @@ -84,11 +84,11 @@ suite("test_table_properties") { """ qt_select_check_1 """select count(*) from information_schema.table_properties where table_schema=\"${dbName}\"; """ - qt_select_check_2 """select * from information_schema.table_properties where table_schema=\"${dbName}\" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE; """ + qt_select_check_2 """select * from information_schema.table_properties where table_schema=\"${dbName}\" and PROPERTY_NAME != "default.replication_allocation" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE""" sql """ drop table listtable; """ - qt_select_check_3 """select * from information_schema.table_properties where table_schema=\"${dbName}\" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE; """ + qt_select_check_3 """select * from information_schema.table_properties where table_schema=\"${dbName}\" and PROPERTY_NAME != "default.replication_allocation" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE""" def user = "table_properties_user" sql "DROP USER IF EXISTS ${user}" @@ -106,17 +106,17 @@ suite("test_table_properties") { def url=tokens[0] + "//" + tokens[2] + "/" + "information_schema" + "?" connect(user=user, password='123abc!@#', url=url) { - qt_select_check_4 """select * from information_schema.table_properties ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE; """ + qt_select_check_4 """select * from information_schema.table_properties where PROPERTY_NAME != "default.replication_allocation" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE""" } sql "GRANT SELECT_PRIV ON ${dbName}.duplicate_table TO ${user}" connect(user=user, password='123abc!@#', url=url) { - qt_select_check_5 """select * from information_schema.table_properties ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE; """ + qt_select_check_5 """select * from information_schema.table_properties where PROPERTY_NAME != "default.replication_allocation" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE""" } sql "REVOKE SELECT_PRIV ON ${dbName}.duplicate_table FROM ${user}" connect(user=user, password='123abc!@#', url=url) { - qt_select_check_6 """select * from information_schema.table_properties ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE; """ + qt_select_check_6 """select * from information_schema.table_properties where PROPERTY_NAME != "default.replication_allocation" ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PROPERTY_NAME,PROPERTY_VALUE""" } diff --git a/regression-test/suites/query_p0/test_forward_qeury.groovy b/regression-test/suites/query_p0/test_forward_qeury.groovy index 28295e4ec895bb..798e8865ca5074 100644 --- a/regression-test/suites/query_p0/test_forward_qeury.groovy +++ b/regression-test/suites/query_p0/test_forward_qeury.groovy @@ -43,10 +43,6 @@ suite("test_forward_query", 'docker') { cluster.injectDebugPoints(NodeType.FE, ['StmtExecutor.forward_all_queries' : [forwardAllQueries:true]]) - try { - sql """ SELECT * FROM ${tbl} """ - } catch (Exception ignored) { - assertTrue(false) - } + sql """ SELECT * FROM ${tbl} """ } } diff --git a/regression-test/suites/query_p0/test_row_policy.groovy b/regression-test/suites/query_p0/test_row_policy.groovy index 620f49dac4e7d3..01766e9a084057 100644 --- a/regression-test/suites/query_p0/test_row_policy.groovy +++ b/regression-test/suites/query_p0/test_row_policy.groovy @@ -35,11 +35,6 @@ suite("test_row_policy") { assertTrue(!clusters.isEmpty()) def validCluster = clusters[0][0] sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; - } - - connect(user=user, password='123456', url=url) { - sql "set enable_nereids_planner = false" - sql "SELECT * FROM ${tableName} a JOIN ${tableName} b ON a.id = b.id" } connect(user=user, password='123456', url=url) { diff --git a/regression-test/suites/query_profile/adaptive_pipeline_task_serial_read_on_limit.groovy b/regression-test/suites/query_profile/adaptive_pipeline_task_serial_read_on_limit.groovy index 46ff11b7845c91..87279f65157409 100644 --- a/regression-test/suites/query_profile/adaptive_pipeline_task_serial_read_on_limit.groovy +++ b/regression-test/suites/query_profile/adaptive_pipeline_task_serial_read_on_limit.groovy @@ -50,7 +50,7 @@ suite('adaptive_pipeline_task_serial_read_on_limit') { `id` INT, `name` varchar(32) ) ENGINE=OLAP - DISTRIBUTED BY HASH(`id`) BUCKETS 10 + DISTRIBUTED BY HASH(`id`) BUCKETS 5 PROPERTIES ( "replication_allocation" = "tag.location.default: 1" ); @@ -108,7 +108,7 @@ suite('adaptive_pipeline_task_serial_read_on_limit') { set enable_adaptive_pipeline_task_serial_read_on_limit=true; """ sql """ - set adaptive_pipeline_task_serial_read_on_limit=10; + set adaptive_pipeline_task_serial_read_on_limit=20; """ sql """ select "modify_to_20_${uuidString}", * from adaptive_pipeline_task_serial_read_on_limit limit 15; diff --git a/regression-test/suites/rollup_p0/test_rollup_add_column.groovy b/regression-test/suites/rollup_p0/test_rollup_add_column.groovy index 944dd1d128f088..cc31644db839be 100644 --- a/regression-test/suites/rollup_p0/test_rollup_add_column.groovy +++ b/regression-test/suites/rollup_p0/test_rollup_add_column.groovy @@ -14,6 +14,9 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +import org.awaitility.Awaitility +import static java.util.concurrent.TimeUnit.SECONDS + suite("test_rollup_add_column") { def tbName = "test_rollup_add_column" def rollupName = "test_rollup_add_column_index" @@ -41,38 +44,30 @@ suite("test_rollup_add_column") { """ sql """ALTER TABLE ${tbName} ADD ROLLUP ${rollupName}(k1, v1);""" - int max_try_secs = 60 - while (max_try_secs--) { + int max_try_secs = 120 + // if timeout testcase will fail same behaviour as old method. + Awaitility.await().atMost(max_try_secs, SECONDS).pollInterval(2, SECONDS).until{ String res = getJobRollupState(tbName) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) sleep(3000) - break - } else { - Thread.sleep(2000) - if (max_try_secs < 1) { - println "test timeout," + "state:" + res - assertEquals("FINISHED",res) - } - } + return true; + } + return false; } + Thread.sleep(2000) sql "ALTER TABLE ${tbName} ADD COLUMN k3 INT KEY NOT NULL DEFAULT '3' AFTER k1 TO ${rollupName};" - max_try_secs = 60 - while (max_try_secs--) { + + Awaitility.await().atMost(max_try_secs, SECONDS).pollInterval(2, SECONDS).until{ String res = getJobColumnState(tbName) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) sleep(3000) - break - } else { - Thread.sleep(2000) - if (max_try_secs < 1) { - println "test timeout," + "state:" + res - assertEquals("FINISHED",res) - } + return true; } + return false; } sql "insert into ${tbName} values(1, 2, 3, 4, 5);" diff --git a/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy b/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy index e17ae1075b5fec..cacb0f33980e3c 100644 --- a/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy +++ b/regression-test/suites/rollup_p0/test_rollup_agg_date.groovy @@ -14,6 +14,9 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +import org.awaitility.Awaitility +import static java.util.concurrent.TimeUnit.SECONDS + suite("test_rollup_agg_date", "rollup") { def tbName = "test_rollup_agg_date" @@ -41,38 +44,31 @@ suite("test_rollup_agg_date", "rollup") { DISTRIBUTED BY HASH(datek1) BUCKETS 5 properties("replication_num" = "1"); """ sql """ALTER TABLE ${tbName} ADD ROLLUP rollup_date(datek1,datetimek2,datetimek1,datetimek3,datev1,datetimev1,datetimev2,datetimev3);""" - int max_try_secs = 60 - while (max_try_secs--) { + int max_try_secs = 120 + // if timeout testcase will fail same behaviour as old nethod. + Awaitility.await().atMost(max_try_secs, SECONDS).pollInterval(2, SECONDS).until{ String res = getJobRollupState(tbName) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) sleep(3000) - break - } else { - Thread.sleep(2000) - if (max_try_secs < 1) { - println "test timeout," + "state:" + res - assertEquals("FINISHED",res) - } + return true; } + return false; } + Thread.sleep(2000) sql "ALTER TABLE ${tbName} ADD COLUMN datetimev4 datetimev2(3) MAX NULL;" - max_try_secs = 60 - while (max_try_secs--) { + // if timeout testcase will fail same behaviour as old nethod. + Awaitility.await().atMost(max_try_secs, SECONDS).pollInterval(2, SECONDS).until{ String res = getJobColumnState(tbName) if (res == "FINISHED" || res == "CANCELLED") { assertEquals("FINISHED", res) sleep(3000) - break - } else { - Thread.sleep(2000) - if (max_try_secs < 1) { - println "test timeout," + "state:" + res - assertEquals("FINISHED",res) - } + return true; } + return false; } + sql "SHOW ALTER TABLE ROLLUP WHERE TableName='${tbName}';" qt_sql "DESC ${tbName} ALL;" sql "insert into ${tbName} values('2022-08-22', '2022-08-22 11:11:11.111111', '2022-08-22 11:11:11.111111', '2022-08-22 11:11:11.111111', '2022-08-22', '2022-08-22 11:11:11.111111', '2022-08-22 11:11:11.111111', '2022-08-22 11:11:11.111111', '2022-08-22 11:11:11.111111');" diff --git a/regression-test/suites/schema_change/test_schema_change_concurrent_with_txn.groovy b/regression-test/suites/schema_change/test_schema_change_concurrent_with_txn.groovy index 3a63e306ae15de..a31a7786e44423 100644 --- a/regression-test/suites/schema_change/test_schema_change_concurrent_with_txn.groovy +++ b/regression-test/suites/schema_change/test_schema_change_concurrent_with_txn.groovy @@ -22,6 +22,7 @@ suite('test_schema_change_concurrent_with_txn', 'docker') { def options = new ClusterOptions() options.enableDebugPoints() options.feConfigs.add('publish_wait_time_second=-1') + options.feConfigs.add('enable_abort_txn_by_checking_conflict_txn=false') docker(options) { sql 'SET GLOBAL insert_visible_timeout_ms = 2000' diff --git a/regression-test/suites/schema_change_p0/test_agg_schema_value_modify1.groovy b/regression-test/suites/schema_change_p0/test_agg_schema_value_modify1.groovy new file mode 100644 index 00000000000000..ddb63d8a3e7d7c --- /dev/null +++ b/regression-test/suites/schema_change_p0/test_agg_schema_value_modify1.groovy @@ -0,0 +1,2401 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_agg_schema_value_modify1", "p0") { + def tbName1 = "agg_model_value_change1" + def tbName2 = "agg_model_value_change_1" + def on_write = getRandomBoolean() + println String.format("current enable_agg_key_merge_on_write is : %s ", on_write) + //Test the agg model by adding a value column + sql """ DROP TABLE IF EXISTS ${tbName1} """ + def getTableStatusSql = " SHOW ALTER TABLE COLUMN WHERE IndexName='${tbName1}' ORDER BY createtime DESC LIMIT 1 " + def errorMessage = "" + /** + * Test the agg model by modify a value type + */ + def initTable2 = "" + def initTableData2 = "" + sql """ DROP TABLE IF EXISTS ${tbName1} """ + def initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `is_teacher` BOOLEAN REPLACE_IF_NOT_NULL COMMENT \"是否是老师\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + def initTableData = "insert into ${tbName1} values(123456789, 'Alice', 0, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00')," + + " (234567890, 'Bob', 0, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00')," + + " (345678901, 'Carol', 1, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00')," + + " (456789012, 'Dave', 0, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00')," + + " (567890123, 'Eve', 0, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00')," + + " (678901234, 'Frank', 1, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00')," + + " (789012345, 'Grace', 0, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00');" + + def insertSql = "" + + /** + * Test the agg model by modify a value type from FLOAT to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from FLOAT to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', false, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from FLOAT to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from FLOAT to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from FLOAT to INT + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column score INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from FLOAT to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from FLOAT to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Loss of accuracy Test the agg model by modify a value type from FLOAT to DOUBLE + + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DOUBLE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DOUBLE REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from FLOAT to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to DECIMAL128" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from FLOAT to DATE + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to DATEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from FLOAT to DATE + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to DATEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from FLOAT to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to DATETIMEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31 20:12:12', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from FLOAT to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to DATETIMEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31 20:12:12', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from FLOAT to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asdc', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from FLOAT to VARCHAR + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asdv', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` VARCHAR(100) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 'asdv', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}", "${tbName2}", "score") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //Test the agg model by modify a value type from FLOAT to STRING + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` STRING REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}", "${tbName2}", "score") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from FLOAT to map + //Test the agg model by modify a value type from FLOAT to STRING + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', {'a': 100, 'b': 200}, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from FLOAT to JSON + errorMessage = "errCode = 2, detailMessage = Can not change FLOAT to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '{'a': 100, 'b': 200}', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + /** + * Test the agg model by modify a value type from DOUBLE to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DOUBLE REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from DOUBLE to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', false, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from DOUBLE to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DOUBLE to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DOUBLE to INT + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column score INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DOUBLE to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DOUBLE to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DOUBLE to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to FLOAT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DOUBLE to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to DECIMAL128" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DOUBLE to DATE + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to DATEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from DOUBLE to DATE + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to DATEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DOUBLE to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to DATETIMEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31 20:12:12', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from DOUBLE to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to DATETIMEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31 20:12:12', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from DOUBLE to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DOUBLE to VARCHAR + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` VARCHAR(100) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}", "${tbName2}", "score") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //Test the agg model by modify a value type from DOUBLE to STRING + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` STRING REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}", "${tbName2}", "score") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from DOUBLE to map + //Test the agg model by modify a value type from DOUBLE to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', {'a': 100, 'b': 200}, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DOUBLE to JSON + errorMessage = "errCode = 2, detailMessage = Can not change DOUBLE to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '{'a': 100, 'b': 200}', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + /** + * Test the agg model by modify a value type from DECIMAL to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DECIMAL(38,10) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from DECIMAL128 to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', false, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from DECIMAL128 to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DECIMAL128 to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DECIMAL128 to INT + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column score INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DECIMAL128 to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DECIMAL128 to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DECIMAL128 to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to FLOAT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Data accuracy loss Data rounding Test the agg model by modify a value type from DECIMAL128 to DECIMAL + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + + + //TODO Test the agg model by modify a value type from DECIMAL128 to DATE + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to DATEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from DECIMAL128 to DATE + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to DATEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DECIMAL128 to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to DATETIMEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31 20:12:12', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from DECIMAL128 to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to DATETIMEV2" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '2003-12-31 20:12:12', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from DECIMAL128 to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test Data accuracy loss the agg model by modify a value type from DECIMAL128 to VARCHAR + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` VARCHAR(100) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Data accuracy loss Test the agg model by modify a value type from DECIMAL128 to STRING + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` STRING REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (993456689, 'Alice', 'asd', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]')," + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from DECIMAL128 to map + //Test the agg model by modify a value type from DECIMAL128 to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', {'a': 100, 'b': 200}, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DECIMAL128 to JSON + errorMessage = "errCode = 2, detailMessage = Can not change DECIMAL128 to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column score JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(993456689, 'Alice', '{'a': 100, 'b': 200}', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + + /** + * Test the agg model by modify a value type from DATE to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATE REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from DATE to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', false, {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from DATE to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATE to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 2, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATE to INT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column register_time INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 156, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATE to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '15662', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATE to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 15898, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATE to DOUBLE + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to DOUBLE" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DOUBLE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATE to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to DECIMAL128" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.6598, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATE to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to FLOAT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.6, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from DATE to DATETIME + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from DATE to DATETIME + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIMEV2 REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + + //TODO Test the agg model by modify a value type from DATE to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'cs1', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATE to VARCHAR + //Test the agg model by modify a value type from DATE to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to VARCHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.45, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'asd', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATE to STRING + //Test the agg model by modify a value type from DATE to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to STRING" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.89, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATE to map + //Test the agg model by modify a value type from DATE to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.49, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', {'a': 100, 'b': 200}, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATE to JSON + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.34, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '{'a': 100, 'b': 200}', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + + /** + * Test the agg model by modify a value type from DATEV2 to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATEV2 REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from DATEV2 to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', false, {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from DATEV2 to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATEV2 to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 2, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATEV2 to INT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column register_time INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 156, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATEV2 to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '15662', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATEV2 to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 15898, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATEV2 to DOUBLE + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to DOUBLE" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DOUBLE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATEV2 to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to DECIMAL128" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.6598, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATEV2 to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to FLOAT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.6, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATEV2 to DATETIME + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + + //TODO Test the agg model by modify a value type from DATEV2 to DATETIME + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIMEV2 REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31 20:12:12', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + + //TODO Test the agg model by modify a value type from DATEV2 to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'cs1', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATEV2 to VARCHAR + //Test the agg model by modify a value type from DATE to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to VARCHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.45, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'asd', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATEV2 to STRING + //Test the agg model by modify a value type from DATE to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to STRING" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.89, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATEV2 to map + //Test the agg model by modify a value type from DATE to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.49, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', {'a': 100, 'b': 200}, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATEV2 to JSON + errorMessage = "errCode = 2, detailMessage = Can not change DATEV2 to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.34, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '{'a': 100, 'b': 200}', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + + + /** + * Test the agg model by modify a value type from DATETIME to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:48:26', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 10:48:26', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 10:48:26', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 10:48:26', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 10:48:26', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 10:48:26', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 10:48:26', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from DATETIME to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', false, {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from DATETIME to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIME to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 2, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATETIME to INT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column register_time INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 156, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIME to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '15662', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATETIME to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 15898, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIME to DOUBLE + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to DOUBLE" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DOUBLE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIME to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to DECIMAL128" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.6598, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIME to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to FLOAT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.6, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIME to DATE + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATE REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from DATETIME to DATEV2 + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATEV2 REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + //TODO Test the agg model by modify a value type from DATETIME to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'cs1', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATETIME to VARCHAR + //Test the agg model by modify a value type from DATETIME to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to VARCHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.45, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'asd', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATETIME to STRING + //Test the agg model by modify a value type from DATETIME to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to STRING" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.89, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIME to map + //Test the agg model by modify a value type from DATETIME to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.49, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', {'a': 100, 'b': 200}, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIME to JSON + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.34, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '{'a': 100, 'b': 200}', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + /** + * Test the agg model by modify a value type from DATETIMEV2 to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:48:26', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 10:48:26', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 10:48:26', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 10:48:26', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 10:48:26', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 10:48:26', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 10:48:26', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from DATETIMEV2 to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to BOOLEAN" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', false, {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from DATETIMEV2 to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to TINYINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1', {'a': 100, 'b': 200}, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIMEV2 to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to SMALLINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 2, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATETIMEV2 to INT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to INT" + expectException({ + sql initTable + sql initTableData + + sql """ alter table ${tbName1} MODIFY column register_time INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 156, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIMEV2 to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to BIGINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 545645, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '15662', {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from DATETIMEV2 to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to LARGEINT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 156546, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 15898, {'a': 100, 'b': 200}, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIMEV2 to DOUBLE + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to DOUBLE" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DOUBLE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIMEV2 to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to DECIMAL128" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 3.6598, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIMEV2 to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to FLOAT" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.6, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 1.65, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + // Test the agg model by modify a value type from DATETIMEV2 to DATE + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATE REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + + //Test the agg model by modify a value type from DATETIMEV2 to DATEV2 + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + + sql """ DROP TABLE IF EXISTS ${tbName2} """ + initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` FLOAT REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATEV2 REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + + " );" + + initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 1.8, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.2, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (923456689, 'Alice', 1.2, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2003-12-31', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.1, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + sql initTable2 + sql initTableData2 + checkTableData("${tbName1}","${tbName2}","register_time") + sql """ DROP TABLE IF EXISTS ${tbName1} """ + + + + + //TODO Test the agg model by modify a value type from DATETIMEV2 to CHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to CHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time CHAR(15) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'cs1', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATETIMEV2 to VARCHAR + //Test the agg model by modify a value type from DATETIME to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to VARCHAR" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.45, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', 'asd', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //TODO Test the agg model by modify a value type from DATETIMEV2 to STRING + //Test the agg model by modify a value type from DATETIME to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to STRING" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.89, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from DATETIMEV2 to map + //Test the agg model by modify a value type from DATETIME to STRING + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to MAP" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.49, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', {'a': 100, 'b': 200}, {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from DATETIMEV2 to JSON + errorMessage = "errCode = 2, detailMessage = Can not change DATETIMEV2 to JSON" + expectException({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column register_time JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.34, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '{'a': 100, 'b': 200}', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + +} diff --git a/regression-test/suites/schema_change_p0/test_agg_schema_value_modify4.groovy b/regression-test/suites/schema_change_p0/test_agg_schema_value_modify4.groovy new file mode 100644 index 00000000000000..b1c0c4d1453733 --- /dev/null +++ b/regression-test/suites/schema_change_p0/test_agg_schema_value_modify4.groovy @@ -0,0 +1,1089 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_agg_schema_value_modify4", "p0") { + def tbName1 = "test_agg_model_value_change3" + def tbName2 = "test_agg_model_value_change_3" + + //Test the agg model by adding a value column + sql """ DROP TABLE IF EXISTS ${tbName1} """ + def initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `city` VARCHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + def initTableData = "insert into ${tbName1} values(123456789, 'Alice', 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00')," + + " (234567890, 'Bob', 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00')," + + " (345678901, 'Carol', 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00')," + + " (456789012, 'Dave', 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00')," + + " (567890123, 'Eve', 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00')," + + " (678901234, 'Frank', 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00')," + + " (789012345, 'Grace', 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00');" + + //Test the agg model by adding a value column with VARCHAR + sql initTable + sql initTableData + def getTableStatusSql = " SHOW ALTER TABLE COLUMN WHERE IndexName='${tbName1}' ORDER BY createtime DESC LIMIT 1 " + def errorMessage = "" + def insertSql = "insert into ${tbName1} values(923456689, 'Alice', '四川省', 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00');" + + + /** + * Test the agg model by modify a value type from MAP to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DECIMAL(38,10) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` CHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from MAP to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', false, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from MAP to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 1, '{\"k1\":\"v1\", \"k2\": 200}'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from MAP to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 3, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from MAP to INT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4.1, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 23, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from MAP to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 4564, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from MAP to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.36, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 43643734, [\"abc\", \"def\"]); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from MAP to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 5.6, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from MAP to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 895.666, '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from MAP to DATE + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', '2003-12-31', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from MAP to DATEV2 + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', '2003-12-31', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from MAP to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 9.63, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', '2003-12-31 20:12:12', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from MAP to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', '2003-12-31 20:12:12', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from MAP to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 'sdfghjk', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from MAP to STRING + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.59, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', 'wertyu', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from MAP to JSON + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column m JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', '{'a': 100, 'b': 200}', '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + /** + * Test the agg model by modify a value type from JSON to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DECIMAL(38,10) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` CHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]')," + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]')," + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]')," + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]')," + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]')," + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]')," + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]');" + + //TODO Test the agg model by modify a value type from JSON to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change JSON to BOOLEAN" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', , false); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from JSON to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change JSON to TINYINT" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 1); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from JSON to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change JSON to SMALLINT" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 21); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from JSON to INT + errorMessage = "errCode = 2, detailMessage = Can not change JSON to INT" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4.1, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 25); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from JSON to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change JSON to BIGINT" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 32523); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from JSON to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change JSON to LARGEINT" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.36, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 356436); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from JSON to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change JSON to FLOAT" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 86.5); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from JSON to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change JSON to DECIMAL128" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 896.2356); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from JSON to DATE + errorMessage = "errCode = 2, detailMessage = Can not change JSON to DATEV2" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '2003-12-31'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from JSON to DATEV2 + errorMessage = "errCode = 2, detailMessage = Can not change JSON to DATEV2" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '2003-12-31'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from JSON to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change JSON to DATETIMEV2" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 9.63, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '2003-12-31 20:12:12'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from JSON to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change JSON to DATETIMEV2" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '2003-12-31 20:12:12'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from JSON to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change JSON to VARCHAR" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, 'erwtewxa'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from JSON to STRING + errorMessage = "errCode = 2, detailMessage = Can not change JSON to STRING" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.59, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '36tgeryda'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from JSON to MAP + errorMessage = "errCode = 2" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column j Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, {'a': 700, 'b': 200}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + /** + * Test the agg model by modify a value type from array to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DECIMAL(38,10) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` CHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `array` ARRAY REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `STRUCT` STRUCT REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'});" + + //TODO Test the agg model by modify a value type from array to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', false, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from ARRAY to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 1, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from ARRAY to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 21, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from ARRAY to INT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4.1, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 25, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from ARRAY to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 32454, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from ARRAY to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.36, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 34235, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from ARRAY to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 45,8, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from ARRAY to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 677.908, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from ARRAY to DATE + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', '2023-10-23', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from ARRAY to DATEV2 + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', '2023-10-23', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from ARRAY to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 9.63, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', '2023-10-23 15:00:26', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from ARRAY to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', '2023-10-26 15:54:21', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from ARRAY to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 'wrwertew', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from ARRAY to STRING + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.59, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', 'eterytergfds', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from ARRAY to JSON + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', {'a': 700, 'b':500}, {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from ARRAY to JSON + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', '[\"abc\", \"def\"]', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from ARRAY to JSON + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column array STRUCT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', '[\"abc\", \"def\"]', {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + /** + * Test the agg model by modify a value type from STRUCT to other type + */ + sql """ DROP TABLE IF EXISTS ${tbName1} """ + initTable = " CREATE TABLE IF NOT EXISTS ${tbName1}\n" + + " (\n" + + " `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" + + " `username` VARCHAR(50) NOT NULL COMMENT \"用户昵称\",\n" + + " `score` DECIMAL(38,10) REPLACE_IF_NOT_NULL COMMENT \"分数\",\n" + + " `city` CHAR(20) REPLACE_IF_NOT_NULL COMMENT \"用户所在城市\",\n" + + " `age` SMALLINT REPLACE_IF_NOT_NULL COMMENT \"用户年龄\",\n" + + " `sex` TINYINT REPLACE_IF_NOT_NULL COMMENT \"用户性别\",\n" + + " `phone` LARGEINT REPLACE_IF_NOT_NULL COMMENT \"用户电话\",\n" + + " `address` VARCHAR(500) REPLACE_IF_NOT_NULL COMMENT \"用户地址\",\n" + + " `register_time` DATETIME REPLACE_IF_NOT_NULL COMMENT \"用户注册时间\",\n" + + " `m` Map REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `j` JSON REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `array` ARRAY REPLACE_IF_NOT_NULL COMMENT \"\",\n" + + " `STRUCT` STRUCT REPLACE_IF_NOT_NULL COMMENT \"\"\n" + + " )\n" + + " AGGREGATE KEY(`user_id`, `username`)\n" + + " DISTRIBUTED BY HASH(`user_id`) BUCKETS 1\n" + + " PROPERTIES (\n" + + " \"replication_allocation\" = \"tag.location.default: 1\"\n" + + " );" + + initTableData = "insert into ${tbName1} values(123456789, 'Alice', 1.83, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 100, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (234567890, 'Bob', 1.89, 'Shanghai', 30, 1, 13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00', {'a': 200, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (345678901, 'Carol', 2.6689, 'Guangzhou', 28, 0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00', {'a': 300, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (456789012, 'Dave', 3.9456, 'Shenzhen', 35, 1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00', {'a': 400, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (567890123, 'Eve', 4.223, 'Chengdu', 27, 0, 13572468091, 'No. 654 Street, Chengdu', '2022-05-05 18:00:00', {'a': 500, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (678901234, 'Frank', 2.5454, 'Hangzhou', 32, 1, 13467985213, 'No. 321 Street, Hangzhou', '2022-06-06 20:00:00', {'a': 600, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}), " + + " (789012345, 'Grace', 2.19656, 'Xian', 29, 0, 13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'});" + + //TODO Test the agg model by modify a value type from STRUCT to BOOLEAN + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT BOOLEAN REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], false); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + // TODO Test the agg model by modify a value type from STRUCT to TINYINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT TINYINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.2, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 1); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from STRUCT to SMALLINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT SMALLINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 21); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from STRUCT to INT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT INT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 4.1, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 21); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from STRUCT to BIGINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT BIGINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 32454); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //Test the agg model by modify a value type from STRUCT to LARGEINT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT LARGEINT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 2.36, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 34235); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from STRUCT to FLOAT + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT FLOAT REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 45.5); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //TODO Test the agg model by modify a value type from STRUCT to DECIMAL + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT DECIMAL(38,0) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 1.23, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], 677.908); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from STRUCT to DATE + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT DATE REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.6, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], '2023-10-23'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from STRUCT to DATEV2 + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT DATEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.3, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], '2023-10-23'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //TODO Test the agg model by modify a value type from STRUCT to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT DATETIME REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 9.63, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], '2023-10-23 15:00:26'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + //TODO Test the agg model by modify a value type from STRUCT to DATETIME + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT DATETIMEV2 REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], '2023-10-26 15:54:21'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + + }, errorMessage) + + + //Test the agg model by modify a value type from STRUCT to VARCHAR + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT VARCHAR(100) REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 5.69, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], "ertet"); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from STRUCT to STRING + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT STRING REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 6.59, 'Yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], "wrwerew"); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, false, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from STRUCT to JSON + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT JSON REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\\\"abc\\\", \\\"def\\\"]', [6,7,8], {'a': 700, 'b': 200}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + //Test the agg model by modify a value type from STRUCT to MAP + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT Map REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], '[\"abc\", \"def\"]'); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + + + //Test the agg model by modify a value type from STRUCT to ARRAY + errorMessage = "errCode = 2, detailMessage = Can not change" + expectExceptionLike({ + sql initTable + sql initTableData + sql """ alter table ${tbName1} MODIFY column STRUCT ARRAY REPLACE_IF_NOT_NULL """ + insertSql = "insert into ${tbName1} values(923456689, 'Alice', 8.47, 'yaan', 25, 0, 13812345678, 'No. 123 Street, Beijing', '2022-01-01 10:00:00', {'a': 700, 'b': 200}, '[\"abc\", \"def\"]', [6,7,8], {1, 'sn1', 'sa1'}); " + waitForSchemaChangeDone({ + sql getTableStatusSql + time 600 + }, insertSql, true, "${tbName1}") + }, errorMessage) + +} diff --git a/regression-test/suites/show_p0/test_show_create_table_and_views.groovy b/regression-test/suites/show_p0/test_show_create_table_and_views.groovy deleted file mode 100644 index 8b087adc4ab922..00000000000000 --- a/regression-test/suites/show_p0/test_show_create_table_and_views.groovy +++ /dev/null @@ -1,150 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import org.apache.doris.regression.util.JdbcUtils - -suite("test_show_create_table_and_views", "show") { - sql "SET enable_nereids_planner=false;" - - def shouldNotShowHiddenColumnsAndCreateWithHiddenColumns = { - connect { - multi_sql """ - SET enable_nereids_planner=false; - drop table if exists test_show_create_table_no_hidden_column; - create table test_show_create_table_no_hidden_column(id int, name varchar(50)) unique key(id) distributed by hash(id) properties('replication_num'='1'); - set show_hidden_columns=true; - """ - - def result = JdbcUtils.executeToMapArray(context.getConnection(), "show create table test_show_create_table_no_hidden_column") - assertTrue(!result[0].get("Create Table").toString().contains("__DORIS_DELETE_SIGN__")) - - test { - sql "create table table_with_hidden_sign(id int, __DORIS_DELETE_SIGN__ int) distributed by hash(id) properties('replication_num'='1')" - exception "Disable to create table column with name start with __DORIS_: " - } - } - }() - - def ret = sql "SHOW FRONTEND CONFIG like '%enable_feature_binlog%';" - logger.info("${ret}") - if (ret.size() != 0 && ret[0].size() > 1 && ret[0][1] == 'false') { - logger.info("enable_feature_binlog=false in frontend config, no need to run this case.") - return - } - - String suiteName = "show_create_table_and_views" - String dbName = "${suiteName}_db" - String tableName = "${suiteName}_table" - String viewName = "${suiteName}_view" - String rollupName = "${suiteName}_rollup" - String likeName = "${suiteName}_like" - - sql "CREATE DATABASE IF NOT EXISTS ${dbName}" - sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" - sql """ - CREATE TABLE ${dbName}.${tableName} ( - `user_id` LARGEINT NOT NULL, - `good_id` LARGEINT NOT NULL, - `cost` BIGINT SUM DEFAULT "0", - INDEX index_user_id (`user_id`) USING INVERTED COMMENT 'test index comment', - INDEX index_good_id (`good_id`) USING INVERTED COMMENT 'test index" comment' - ) - AGGREGATE KEY(`user_id`, `good_id`) - PARTITION BY RANGE(`good_id`) - ( - PARTITION p1 VALUES LESS THAN ("100"), - PARTITION p2 VALUES LESS THAN ("200"), - PARTITION p3 VALUES LESS THAN ("300"), - PARTITION p4 VALUES LESS THAN ("400"), - PARTITION p5 VALUES LESS THAN ("500"), - PARTITION p6 VALUES LESS THAN ("600"), - PARTITION p7 VALUES LESS THAN MAXVALUE - ) - DISTRIBUTED BY HASH(`user_id`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1" - ) - """ - - sql """INSERT INTO ${dbName}.${tableName} VALUES - (1, 1, 10), - (1, 1, 20), - (1, 2, 5), - (1, 3, 10), - (2, 1, 0), - (2, 1, 100), - (3, 1, 10), - (2, 2, 10), - (2, 3, 44), - (3, 2, 1), - (100, 100, 1), - (200, 20, 1), - (300, 20, 1), - (1, 300, 2), - (2, 200, 1111), - (23, 900, 1)""" - - qt_show "SHOW CREATE TABLE ${dbName}.${tableName}" - qt_select "SELECT * FROM ${dbName}.${tableName} ORDER BY user_id, good_id" - - sql "drop view if exists ${dbName}.${viewName};" - // create view and show - sql """ - CREATE VIEW IF NOT EXISTS ${dbName}.${viewName} (user_id, cost) - AS - SELECT user_id, cost FROM ${dbName}.${tableName} - WHERE good_id = 2 - """ - qt_select "SELECT * FROM ${dbName}.${viewName} ORDER BY user_id" - qt_show "SHOW CREATE VIEW ${dbName}.${viewName}" - - // create rollup - sql """ALTER TABLE ${dbName}.${tableName} - ADD ROLLUP ${rollupName} (user_id, cost) - """ - - def isAlterTableFinish = { -> - def records = sql """SHOW ALTER TABLE ROLLUP FROM ${dbName}""" - for (def row in records) { - if (row[5] == "${rollupName}" && row[8] == "FINISHED") { - return true - } - } - false - } - while (!isAlterTableFinish()) { - Thread.sleep(100) - } - - qt_select "SELECT user_id, SUM(cost) FROM ${dbName}.${tableName} GROUP BY user_id ORDER BY user_id" - qt_show "SHOW CREATE TABLE ${dbName}.${tableName}" - - // create like - sql "CREATE TABLE ${dbName}.${likeName} LIKE ${dbName}.${tableName}" - qt_show "SHOW CREATE TABLE ${dbName}.${likeName}" - - // create like with rollup - sql "CREATE TABLE ${dbName}.${likeName}_with_rollup LIKE ${dbName}.${tableName} WITH ROLLUP" - qt_show "SHOW CREATE TABLE ${dbName}.${likeName}_with_rollup" - - sql "DROP TABLE IF EXISTS ${dbName}.${likeName}_with_rollup FORCE" - sql "DROP TABLE ${dbName}.${likeName} FORCE" - sql "DROP VIEW ${dbName}.${viewName}" - sql "DROP TABLE ${dbName}.${tableName} FORCE" - sql "DROP DATABASE ${dbName} FORCE" -} - diff --git a/regression-test/suites/statistics/test_analyze_mtmv.groovy b/regression-test/suites/statistics/test_analyze_mtmv.groovy index 9825ba455c7fe6..49e1b75a476dbb 100644 --- a/regression-test/suites/statistics/test_analyze_mtmv.groovy +++ b/regression-test/suites/statistics/test_analyze_mtmv.groovy @@ -270,13 +270,20 @@ suite("test_analyze_mtmv") { l_suppkey; """ sql """REFRESH MATERIALIZED VIEW mv1 AUTO""" - while(true) { + boolean refreshed = false; + for (int i = 0; i < 300; i++) { Thread.sleep(1000) def result = sql """select * from mv_infos("database"="test_analyze_mtmv") where Name="mv1";""" + logger.info("refresh mv info:" + result) if (result[0][5] == "SUCCESS") { + refreshed = true; break; } + if (result[0][5] == "FAIL") { + throw new Exception("Refresh mv failed.") + } } + assertTrue(refreshed) def dup_sql1 = """select * from mv1 order by l_shipdate;""" qt_sql1 dup_sql1 diff --git a/regression-test/suites/statistics/test_analyze_mv.groovy b/regression-test/suites/statistics/test_analyze_mv.groovy index d6ee9b4cfc7c14..2d7b4aaea7d462 100644 --- a/regression-test/suites/statistics/test_analyze_mv.groovy +++ b/regression-test/suites/statistics/test_analyze_mv.groovy @@ -112,6 +112,7 @@ suite("test_analyze_mv") { sql """create database test_analyze_mv""" sql """use test_analyze_mv""" sql """set global force_sample_analyze=false""" + sql """set global enable_auto_analyze=false""" sql """CREATE TABLE mvTestDup ( key1 bigint NOT NULL, diff --git a/regression-test/suites/statistics/test_drop_stats_and_truncate.groovy b/regression-test/suites/statistics/test_drop_stats_and_truncate.groovy new file mode 100644 index 00000000000000..6dc3c6d179741a --- /dev/null +++ b/regression-test/suites/statistics/test_drop_stats_and_truncate.groovy @@ -0,0 +1,228 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_drop_stats_and_truncate") { + + sql """drop database if exists test_drop_stats_and_truncate""" + sql """create database test_drop_stats_and_truncate""" + sql """use test_drop_stats_and_truncate""" + sql """set global force_sample_analyze=false""" + sql """set global enable_auto_analyze=false""" + + sql """CREATE TABLE non_part ( + r_regionkey int NOT NULL, + r_name VARCHAR(25) NOT NULL, + r_comment VARCHAR(152) + )ENGINE=OLAP + DUPLICATE KEY(`r_regionkey`) + COMMENT "OLAP" + DISTRIBUTED BY HASH(`r_regionkey`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ); + """ + sql """CREATE TABLE `part` ( + `id` INT NULL, + `colint` INT NULL, + `coltinyint` tinyint NULL, + `colsmallint` smallINT NULL, + `colbigint` bigINT NULL, + `collargeint` largeINT NULL, + `colfloat` float NULL, + `coldouble` double NULL, + `coldecimal` decimal(27, 9) NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + COMMENT 'OLAP' + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES [("-2147483648"), ("10000")), + PARTITION p2 VALUES [("10000"), ("20000")), + PARTITION p3 VALUES [("20000"), ("30000")) + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 3 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql """insert into non_part values (1, "1", "1");""" + sql """analyze table non_part with sync""" + + def result = sql """show column cached stats non_part""" + assertEquals(3, result.size()) + result = sql """show column stats non_part""" + assertEquals(3, result.size()) + result = sql """show table stats non_part""" + def all_columns = result[0][4] + String[] columns = all_columns.split(","); + assertEquals(3, columns.size()) + + sql """drop stats non_part(r_comment)""" + result = sql """show column cached stats non_part""" + assertEquals(2, result.size()) + result = sql """show column stats non_part""" + assertEquals(2, result.size()) + result = sql """show table stats non_part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(2, columns.size()) + + sql """drop stats non_part""" + result = sql """show column cached stats non_part""" + assertEquals(0, result.size()) + result = sql """show column stats non_part""" + assertEquals(0, result.size()) + result = sql """show table stats non_part""" + all_columns = result[0][4] + assertEquals("", all_columns) + + sql """analyze table non_part with sync""" + result = sql """show column cached stats non_part""" + assertEquals(3, result.size()) + result = sql """show column stats non_part""" + assertEquals(3, result.size()) + result = sql """show table stats non_part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(3, columns.size()) + + sql """truncate table non_part""" + result = sql """show column stats non_part""" + assertEquals(0, result.size()) + result = sql """show table stats non_part""" + all_columns = result[0][4] + assertEquals("", all_columns) + + sql """Insert into part values (1, 1, 1, 1, 1, 1, 1.1, 1.1, 1.1), (2, 2, 2, 2, 2, 2, 2.2, 2.2, 2.2), (3, 3, 3, 3, 3, 3, 3.3, 3.3, 3.3),(4, 4, 4, 4, 4, 4, 4.4, 4.4, 4.4),(5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5),(6, 6, 6, 6, 6, 6, 6.6, 6.6, 6.6),(10001, 10001, 10001, 10001, 10001, 10001, 10001.10001, 10001.10001, 10001.10001),(10002, 10002, 10002, 10002, 10002, 10002, 10002.10002, 10002.10002, 10002.10002),(10003, 10003, 10003, 10003, 10003, 10003, 10003.10003, 10003.10003, 10003.10003),(10004, 10004, 10004, 10004, 10004, 10004, 10004.10004, 10004.10004, 10004.10004),(10005, 10005, 10005, 10005, 10005, 10005, 10005.10005, 10005.10005, 10005.10005),(10006, 10006, 10006, 10006, 10006, 10006, 10006.10006, 10006.10006, 10006.10006),(20001, 20001, 20001, 20001, 20001, 20001, 20001.20001, 20001.20001, 20001.20001),(20002, 20002, 20002, 20002, 20002, 20002, 20002.20002, 20002.20002, 20002.20002),(20003, 20003, 20003, 20003, 20003, 20003, 20003.20003, 20003.20003, 20003.20003),(20004, 20004, 20004, 20004, 20004, 20004, 20004.20004, 20004.20004, 20004.20004),(20005, 20005, 20005, 20005, 20005, 20005, 20005.20005, 20005.20005, 20005.20005),(20006, 20006, 20006, 20006, 20006, 20006, 20006.20006, 20006.20006, 20006.20006)""" + sql """analyze table part with sync""" + result = sql """show column cached stats part""" + assertEquals(9, result.size()) + result = sql """show column stats part""" + assertEquals(9, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(9, columns.size()) + + sql """drop stats part(colint)""" + result = sql """show column cached stats part""" + assertEquals(8, result.size()) + result = sql """show column stats part""" + assertEquals(8, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(8, columns.size()) + + sql """drop stats part""" + result = sql """show column cached stats part""" + assertEquals(0, result.size()) + result = sql """show column stats part""" + assertEquals(0, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + assertEquals("", all_columns) + + sql """analyze table part with sync""" + result = sql """show column cached stats part""" + assertEquals(9, result.size()) + result = sql """show column stats part""" + assertEquals(9, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(9, columns.size()) + + sql """truncate table part""" + result = sql """show column stats part""" + assertEquals(0, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + assertEquals("", all_columns) + + sql """Insert into part values (1, 1, 1, 1, 1, 1, 1.1, 1.1, 1.1), (2, 2, 2, 2, 2, 2, 2.2, 2.2, 2.2), (3, 3, 3, 3, 3, 3, 3.3, 3.3, 3.3),(4, 4, 4, 4, 4, 4, 4.4, 4.4, 4.4),(5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5),(6, 6, 6, 6, 6, 6, 6.6, 6.6, 6.6),(10001, 10001, 10001, 10001, 10001, 10001, 10001.10001, 10001.10001, 10001.10001),(10002, 10002, 10002, 10002, 10002, 10002, 10002.10002, 10002.10002, 10002.10002),(10003, 10003, 10003, 10003, 10003, 10003, 10003.10003, 10003.10003, 10003.10003),(10004, 10004, 10004, 10004, 10004, 10004, 10004.10004, 10004.10004, 10004.10004),(10005, 10005, 10005, 10005, 10005, 10005, 10005.10005, 10005.10005, 10005.10005),(10006, 10006, 10006, 10006, 10006, 10006, 10006.10006, 10006.10006, 10006.10006),(20001, 20001, 20001, 20001, 20001, 20001, 20001.20001, 20001.20001, 20001.20001),(20002, 20002, 20002, 20002, 20002, 20002, 20002.20002, 20002.20002, 20002.20002),(20003, 20003, 20003, 20003, 20003, 20003, 20003.20003, 20003.20003, 20003.20003),(20004, 20004, 20004, 20004, 20004, 20004, 20004.20004, 20004.20004, 20004.20004),(20005, 20005, 20005, 20005, 20005, 20005, 20005.20005, 20005.20005, 20005.20005),(20006, 20006, 20006, 20006, 20006, 20006, 20006.20006, 20006.20006, 20006.20006)""" + sql """analyze table part with sync""" + result = sql """show column cached stats part""" + assertEquals(9, result.size()) + result = sql """show column stats part""" + assertEquals(9, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(9, columns.size()) + + sql """truncate table part partition(p1)""" + result = sql """show column cached stats part""" + assertEquals(9, result.size()) + result = sql """show column stats part""" + assertEquals(9, result.size()) + result = sql """show table stats part""" + all_columns = result[0][4] + columns = all_columns.split(","); + assertEquals(9, columns.size()) + + sql """drop table part""" + sql """CREATE TABLE `part` ( + `id` INT NULL, + `colint` INT NULL, + `coltinyint` tinyint NULL, + `colsmallint` smallINT NULL, + `colbigint` bigINT NULL, + `collargeint` largeINT NULL, + `colfloat` float NULL, + `coldouble` double NULL, + `coldecimal` decimal(27, 9) NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + COMMENT 'OLAP' + PARTITION BY RANGE(`id`) + ( + PARTITION p1 VALUES [("-2147483648"), ("10000")), + PARTITION p2 VALUES [("10000"), ("20000")), + PARTITION p3 VALUES [("20000"), ("30000")) + ) + DISTRIBUTED BY HASH(`id`) BUCKETS 3 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql """analyze table part with sync""" + sql """Insert into part values (1, 1, 1, 1, 1, 1, 1.1, 1.1, 1.1)""" + result = sql """show table stats part""" + assertEquals("true", result[0][6]) + sql """truncate table part partition(p1)""" + result = sql """show table stats part""" + assertEquals("true", result[0][6]) + sql """analyze table part with sample rows 100 with sync""" + result = sql """show table stats part""" + if (result[0][6].equals("true")) { + result = """show index stats part part""" + logger.info("Report not ready. index stats: " + result) + sql """analyze table part with sample rows 100 with sync""" + result = sql """show table stats part""" + } + if (result[0][6].equals("true")) { + result = """show index stats part part""" + logger.info("Report not ready. index stats: " + result) + sql """analyze table part with sample rows 100 with sync""" + result = sql """show table stats part""" + } + assertEquals("false", result[0][6]) + + sql """drop database if exists test_drop_stats_and_truncate""" +} + diff --git a/regression-test/suites/statistics/test_partition_stats.groovy b/regression-test/suites/statistics/test_partition_stats.groovy index 16cedb9889d82d..96be61296eadf0 100644 --- a/regression-test/suites/statistics/test_partition_stats.groovy +++ b/regression-test/suites/statistics/test_partition_stats.groovy @@ -14,6 +14,8 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +import org.awaitility.Awaitility +import static java.util.concurrent.TimeUnit.SECONDS suite("test_partition_stats") { @@ -36,33 +38,29 @@ suite("test_partition_stats") { sql """use ${db}""" result = sql """show frontends;""" logger.info("show frontends result master: " + result) - for (int i = 0; i < 120; i++) { - Thread.sleep(5000) + // in 300 sec its not matching, then timeout raised by awaited. + Awaitility.await().atMost(300, SECONDS).pollInterval(5, SECONDS).until{ result = sql """SHOW DATA FROM ${table};""" logger.info("result " + result) if (result[row][column] == expected) { - return; + return true; } + return false; } - throw new Exception("Row count report timeout.") } } def wait_mv_finish = { db, table -> - for (int loop = 0; loop < 300; loop++) { - Thread.sleep(1000) - boolean finished = true; + // in 300 sec its not matching, then timeout raised by awaited. + Awaitility.await().atMost(300, SECONDS).pollInterval(1, SECONDS).until{ def result = sql """SHOW ALTER TABLE MATERIALIZED VIEW FROM ${db} WHERE tableName="${table}";""" for (int i = 0; i < result.size(); i++) { if (result[i][8] != 'FINISHED') { - finished = false; - break; + return false; } } - if (finished) { - return; - } + return true; } throw new Exception("Wait mv finish timeout.") } @@ -116,15 +114,17 @@ suite("test_partition_stats") { // Test show cached partition stats. sql """analyze table part with sync;""" - for (int i = 0; i < 20; i++) { + // in 20 sec its not matching, then timeout raised by awaited. + Awaitility.await().atMost(20, SECONDS).pollInterval(1, SECONDS).until{ result = sql """show column cached stats part partition(*)""" if (result.size() == 27) { logger.info("cache is ready.") - break; + return true; } logger.info("cache is not ready yet.") - Thread.sleep(1000) + return false; } + result = sql """show column cached stats part(id) partition(p1)""" assertEquals("id", result[0][0]) assertEquals("p1", result[0][1]) @@ -479,13 +479,12 @@ suite("test_partition_stats") { result = sql """show auto analyze part4""" assertTrue(result.size() > 0) def index = result.size() - 1; - def finished = false; - for (int i = 0; i < 20; i++) { + // in 30 sec its not matching, then timeout raised by awaited. + Awaitility.await().atMost(30, SECONDS).pollInterval(1, SECONDS).until{ if (result[index][9].equals("FINISHED")) { - finished = true; - break; + return true; } - Thread.sleep(1000) + return false; } if (finished) { result = sql """show column stats part4""" @@ -667,13 +666,12 @@ suite("test_partition_stats") { sql """analyze table part7 properties("use.auto.analyzer"="true")""" result = sql """show auto analyze part7""" assertEquals(1, result.size()) - def finished = false; - for (int i = 0; i < 20; i++) { + // in 20 sec its not matching, then timeout raised by awaited. + Awaitility.await().atMost(20, SECONDS).pollInterval(1, SECONDS).until{ if (result[0][9].equals("FINISHED")) { - finished = true; - break; + return true; } - Thread.sleep(1000) + return false; } if (finished) { result = sql """show column stats part7""" diff --git a/regression-test/suites/table_p0/test_create_table_if_not_exists_as_select.groovy b/regression-test/suites/table_p0/test_create_table_if_not_exists_as_select.groovy index 0cd5cf335c4e1f..35d37757b805d1 100644 --- a/regression-test/suites/table_p0/test_create_table_if_not_exists_as_select.groovy +++ b/regression-test/suites/table_p0/test_create_table_if_not_exists_as_select.groovy @@ -75,9 +75,6 @@ suite("test_create_table_if_not_exists_as_select") { """ def secondExecuteCount = sql """select count(*) from ${table_name}""" assertEquals(1, secondExecuteCount[0][0]); - sql """ - SET enable_nereids_planner=false; - """ sql """drop table if exists `${table_name}`""" sql """ create table if not exists ${table_name} PROPERTIES("replication_num"="1") as select * from ${base_table_name} diff --git a/regression-test/suites/tpch_sf100_p2/ddl/customer.sql b/regression-test/suites/tpch_sf100_p2/ddl/customer.sql index 5f41d6dbb12496..32665f2ce6a190 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/customer.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/customer.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS customer ( DUPLICATE KEY(C_CUSTKEY, C_NAME) DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/lineitem.sql b/regression-test/suites/tpch_sf100_p2/ddl/lineitem.sql index 34fce81b607e0f..eebd825dc8af7c 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/lineitem.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/lineitem.sql @@ -19,6 +19,7 @@ CREATE TABLE IF NOT EXISTS lineitem ( DUPLICATE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 32 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/nation.sql b/regression-test/suites/tpch_sf100_p2/ddl/nation.sql index 56c133ce1eaf46..1e11fa86ea96dd 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/nation.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/nation.sql @@ -7,6 +7,7 @@ CREATE TABLE IF NOT EXISTS nation ( DUPLICATE KEY(N_NATIONKEY, N_NAME) DISTRIBUTED BY HASH(N_NATIONKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/orders.sql b/regression-test/suites/tpch_sf100_p2/ddl/orders.sql index 1843ef0f2a5c95..a02695d51779e3 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/orders.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/orders.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS orders ( DUPLICATE KEY(O_ORDERKEY, O_CUSTKEY) DISTRIBUTED BY HASH(O_ORDERKEY) BUCKETS 32 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/part.sql b/regression-test/suites/tpch_sf100_p2/ddl/part.sql index f5e75cef3f03d3..91de8dfa43b4c8 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/part.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/part.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS part ( DUPLICATE KEY(P_PARTKEY, P_NAME) DISTRIBUTED BY HASH(P_PARTKEY) BUCKETS 32 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/partsupp.sql b/regression-test/suites/tpch_sf100_p2/ddl/partsupp.sql index 72263631c58c7b..ffb686f3716a18 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/partsupp.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/partsupp.sql @@ -8,6 +8,7 @@ CREATE TABLE IF NOT EXISTS partsupp ( DUPLICATE KEY(PS_PARTKEY, PS_SUPPKEY) DISTRIBUTED BY HASH(PS_PARTKEY) BUCKETS 32 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/region.sql b/regression-test/suites/tpch_sf100_p2/ddl/region.sql index fc94a17d46b890..d4170a03432594 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/region.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/region.sql @@ -6,6 +6,7 @@ CREATE TABLE IF NOT EXISTS region ( DUPLICATE KEY(R_REGIONKEY, R_NAME) DISTRIBUTED BY HASH(R_REGIONKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_p2/ddl/supplier.sql b/regression-test/suites/tpch_sf100_p2/ddl/supplier.sql index 156d5c9f3565a5..c09e59e72f24bd 100644 --- a/regression-test/suites/tpch_sf100_p2/ddl/supplier.sql +++ b/regression-test/suites/tpch_sf100_p2/ddl/supplier.sql @@ -10,5 +10,6 @@ CREATE TABLE IF NOT EXISTS supplier ( DUPLICATE KEY(S_SUPPKEY, S_NAME) DISTRIBUTED BY HASH(S_SUPPKEY) BUCKETS 32 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/customer.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/customer.sql index 49ea3f7a0f8527..5b63e71429d8fb 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/customer.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/customer.sql @@ -12,6 +12,7 @@ UNIQUE KEY(`c_custkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/customer_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/customer_sequence.sql index d916c1b2615522..0936506fd957ec 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/customer_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/customer_sequence.sql @@ -12,6 +12,7 @@ UNIQUE KEY(`c_custkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem.sql index ff4229548204d0..3d5fe214710f43 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem.sql @@ -20,6 +20,7 @@ UNIQUE KEY(`l_shipdate`, `l_orderkey`,`l_linenumber`,`l_partkey`,`l_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem_sequence.sql index 00d03fa2059f31..18a4882fc9d182 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/lineitem_sequence.sql @@ -20,6 +20,7 @@ UNIQUE KEY(`l_shipdate`, `l_orderkey`,`l_linenumber`,`l_partkey`,`l_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'DATE', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/nation.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/nation.sql index a6421ac9b17d24..3581a266099cf7 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/nation.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/nation.sql @@ -8,6 +8,7 @@ UNIQUE KEY(`N_NATIONKEY`) COMMENT "OLAP" DISTRIBUTED BY HASH(`N_NATIONKEY`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ); diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/nation_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/nation_sequence.sql index fab3660a2b5a3d..f1f67a8872a325 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/nation_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/nation_sequence.sql @@ -8,6 +8,7 @@ UNIQUE KEY(`N_NATIONKEY`) COMMENT "OLAP" DISTRIBUTED BY HASH(`N_NATIONKEY`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ); diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/orders.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/orders.sql index 2d2f1d8db7d5c6..57a54f2b329241 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/orders.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/orders.sql @@ -13,5 +13,6 @@ UNIQUE KEY(`o_orderkey`, `o_orderdate`) COMMENT "OLAP" DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/orders_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/orders_sequence.sql index 4645ed0cc9414f..0c96bf845fdc28 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/orders_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/orders_sequence.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`o_orderkey`, `o_orderdate`) COMMENT "OLAP" DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'bigint', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/part.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/part.sql index 190a0019609b24..c3e5014e709e60 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/part.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/part.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`p_partkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/part_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/part_sequence.sql index 4017225439033e..fe85dd84d30ee8 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/part_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/part_sequence.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`p_partkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp.sql index 00c15535e834dd..fd83aec75cf026 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp.sql @@ -9,6 +9,7 @@ UNIQUE KEY(`ps_partkey`,`ps_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`ps_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp_sequence.sql index 63f8c72268e8f1..37c3658112adff 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/partsupp_sequence.sql @@ -9,6 +9,7 @@ UNIQUE KEY(`ps_partkey`,`ps_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`ps_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/region.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/region.sql index 66eb7dd77e3ddf..dab7e8b7cd4139 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/region.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/region.sql @@ -7,6 +7,7 @@ UNIQUE KEY(`r_regionkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`r_regionkey`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/region_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/region_sequence.sql index c8f1e8812b829d..11ad6dc1853680 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/region_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/region_sequence.sql @@ -7,6 +7,7 @@ UNIQUE KEY(`r_regionkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`r_regionkey`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier.sql index e7c345d9346624..e31d9332de8df6 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier.sql @@ -11,5 +11,6 @@ UNIQUE KEY(`s_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 12 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier_sequence.sql b/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier_sequence.sql index f7c36adafb9976..50ee24a118a16b 100644 --- a/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier_sequence.sql +++ b/regression-test/suites/tpch_sf100_unique_p2/ddl/supplier_sequence.sql @@ -11,6 +11,7 @@ UNIQUE KEY(`s_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 12 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/customer.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/customer.sql index 49ea3f7a0f8527..5b63e71429d8fb 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/customer.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/customer.sql @@ -12,6 +12,7 @@ UNIQUE KEY(`c_custkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/lineitem.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/lineitem.sql index ff4229548204d0..3d5fe214710f43 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/lineitem.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/lineitem.sql @@ -20,6 +20,7 @@ UNIQUE KEY(`l_shipdate`, `l_orderkey`,`l_linenumber`,`l_partkey`,`l_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/nation.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/nation.sql index a6421ac9b17d24..3581a266099cf7 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/nation.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/nation.sql @@ -8,6 +8,7 @@ UNIQUE KEY(`N_NATIONKEY`) COMMENT "OLAP" DISTRIBUTED BY HASH(`N_NATIONKEY`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ); diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/orders.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/orders.sql index 2d2f1d8db7d5c6..57a54f2b329241 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/orders.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/orders.sql @@ -13,5 +13,6 @@ UNIQUE KEY(`o_orderkey`, `o_orderdate`) COMMENT "OLAP" DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/part.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/part.sql index 190a0019609b24..c3e5014e709e60 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/part.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/part.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`p_partkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/partsupp.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/partsupp.sql index 00c15535e834dd..fd83aec75cf026 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/partsupp.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/partsupp.sql @@ -9,6 +9,7 @@ UNIQUE KEY(`ps_partkey`,`ps_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`ps_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/region.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/region.sql index 66eb7dd77e3ddf..dab7e8b7cd4139 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/region.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/region.sql @@ -7,6 +7,7 @@ UNIQUE KEY(`r_regionkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`r_regionkey`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/supplier.sql b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/supplier.sql index e7c345d9346624..e31d9332de8df6 100644 --- a/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/supplier.sql +++ b/regression-test/suites/tpch_sf100_unique_sql_p2/ddl/supplier.sql @@ -11,5 +11,6 @@ UNIQUE KEY(`s_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 12 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/customer.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/customer.sql index 49ea3f7a0f8527..5b63e71429d8fb 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/customer.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/customer.sql @@ -12,6 +12,7 @@ UNIQUE KEY(`c_custkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/customer_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/customer_sequence.sql index d916c1b2615522..0936506fd957ec 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/customer_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/customer_sequence.sql @@ -12,6 +12,7 @@ UNIQUE KEY(`c_custkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem.sql index ff4229548204d0..3d5fe214710f43 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem.sql @@ -20,6 +20,7 @@ UNIQUE KEY(`l_shipdate`, `l_orderkey`,`l_linenumber`,`l_partkey`,`l_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem_sequence.sql index 00d03fa2059f31..18a4882fc9d182 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/lineitem_sequence.sql @@ -20,6 +20,7 @@ UNIQUE KEY(`l_shipdate`, `l_orderkey`,`l_linenumber`,`l_partkey`,`l_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'DATE', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/nation.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/nation.sql index a6421ac9b17d24..3581a266099cf7 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/nation.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/nation.sql @@ -8,6 +8,7 @@ UNIQUE KEY(`N_NATIONKEY`) COMMENT "OLAP" DISTRIBUTED BY HASH(`N_NATIONKEY`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ); diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/nation_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/nation_sequence.sql index fab3660a2b5a3d..f1f67a8872a325 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/nation_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/nation_sequence.sql @@ -8,6 +8,7 @@ UNIQUE KEY(`N_NATIONKEY`) COMMENT "OLAP" DISTRIBUTED BY HASH(`N_NATIONKEY`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ); diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/orders.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/orders.sql index 2d2f1d8db7d5c6..57a54f2b329241 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/orders.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/orders.sql @@ -13,5 +13,6 @@ UNIQUE KEY(`o_orderkey`, `o_orderdate`) COMMENT "OLAP" DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/orders_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/orders_sequence.sql index 4645ed0cc9414f..0c96bf845fdc28 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/orders_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/orders_sequence.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`o_orderkey`, `o_orderdate`) COMMENT "OLAP" DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'bigint', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/part.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/part.sql index 190a0019609b24..c3e5014e709e60 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/part.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/part.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`p_partkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/part_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/part_sequence.sql index 4017225439033e..fe85dd84d30ee8 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/part_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/part_sequence.sql @@ -13,6 +13,7 @@ UNIQUE KEY(`p_partkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp.sql index 00c15535e834dd..fd83aec75cf026 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp.sql @@ -9,6 +9,7 @@ UNIQUE KEY(`ps_partkey`,`ps_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`ps_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp_sequence.sql index 63f8c72268e8f1..37c3658112adff 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/partsupp_sequence.sql @@ -9,6 +9,7 @@ UNIQUE KEY(`ps_partkey`,`ps_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`ps_partkey`) BUCKETS 24 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/region.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/region.sql index 66eb7dd77e3ddf..dab7e8b7cd4139 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/region.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/region.sql @@ -7,6 +7,7 @@ UNIQUE KEY(`r_regionkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`r_regionkey`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/region_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/region_sequence.sql index c8f1e8812b829d..11ad6dc1853680 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/region_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/region_sequence.sql @@ -7,6 +7,7 @@ UNIQUE KEY(`r_regionkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`r_regionkey`) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier.sql index e7c345d9346624..e31d9332de8df6 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier.sql @@ -11,5 +11,6 @@ UNIQUE KEY(`s_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 12 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier_sequence.sql b/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier_sequence.sql index f7c36adafb9976..50ee24a118a16b 100644 --- a/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier_sequence.sql +++ b/regression-test/suites/tpch_sf10_unique_p2/ddl/supplier_sequence.sql @@ -11,6 +11,7 @@ UNIQUE KEY(`s_suppkey`) COMMENT "OLAP" DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 12 PROPERTIES ( + "enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "replication_num" = "3" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/create_table.sql b/regression-test/suites/tpch_sf1_p2/ddl/create_table.sql index 324b167fc80e07..015058d6d89de7 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/create_table.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/create_table.sql @@ -6,5 +6,6 @@ CREATE TABLE IF NOT EXISTS gavin_test ( DUPLICATE KEY(id, name) DISTRIBUTED BY HASH(id) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/customer.sql b/regression-test/suites/tpch_sf1_p2/ddl/customer.sql index 00b1b98d48917d..0a98bb99ae2823 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/customer.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/customer.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS customer ( DUPLICATE KEY(C_CUSTKEY, C_NAME) DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/lineitem.sql b/regression-test/suites/tpch_sf1_p2/ddl/lineitem.sql index 2b4cb77143886d..30b29ee39c64c3 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/lineitem.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/lineitem.sql @@ -19,6 +19,7 @@ CREATE TABLE IF NOT EXISTS lineitem ( DUPLICATE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/nation.sql b/regression-test/suites/tpch_sf1_p2/ddl/nation.sql index 3eccc0dc976e92..b88f8c7365e4e2 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/nation.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/nation.sql @@ -7,6 +7,7 @@ CREATE TABLE IF NOT EXISTS nation ( DUPLICATE KEY(N_NATIONKEY, N_NAME) DISTRIBUTED BY HASH(N_NATIONKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/orders.sql b/regression-test/suites/tpch_sf1_p2/ddl/orders.sql index caeaa3415082d7..2574b8294678b2 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/orders.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/orders.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS orders ( DUPLICATE KEY(O_ORDERKEY, O_CUSTKEY) DISTRIBUTED BY HASH(O_ORDERKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/part.sql b/regression-test/suites/tpch_sf1_p2/ddl/part.sql index 994b6e66d55c50..e33cb50538cf36 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/part.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/part.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS part ( DUPLICATE KEY(P_PARTKEY, P_NAME) DISTRIBUTED BY HASH(P_PARTKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/partsupp.sql b/regression-test/suites/tpch_sf1_p2/ddl/partsupp.sql index be186a29db9188..8ccc23a27946b1 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/partsupp.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/partsupp.sql @@ -8,6 +8,7 @@ CREATE TABLE IF NOT EXISTS partsupp ( DUPLICATE KEY(PS_PARTKEY, PS_SUPPKEY) DISTRIBUTED BY HASH(PS_PARTKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/region.sql b/regression-test/suites/tpch_sf1_p2/ddl/region.sql index fbe34c05c6f422..fde4c2add332cb 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/region.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/region.sql @@ -6,6 +6,7 @@ CREATE TABLE IF NOT EXISTS region ( DUPLICATE KEY(R_REGIONKEY, R_NAME) DISTRIBUTED BY HASH(R_REGIONKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_p2/ddl/supplier.sql b/regression-test/suites/tpch_sf1_p2/ddl/supplier.sql index 7214eaebd12f13..244db711c43f38 100644 --- a/regression-test/suites/tpch_sf1_p2/ddl/supplier.sql +++ b/regression-test/suites/tpch_sf1_p2/ddl/supplier.sql @@ -10,5 +10,6 @@ CREATE TABLE IF NOT EXISTS supplier ( DUPLICATE KEY(S_SUPPKEY, S_NAME) DISTRIBUTED BY HASH(S_SUPPKEY) BUCKETS 3 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/customer.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/customer.sql index f81902a585efc8..1502dfdffec85b 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/customer.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/customer.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS customer ( UNIQUE KEY(C_CUSTKEY) DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/lineitem.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/lineitem.sql index ee2b84d2479b24..d1ef02f4626b03 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/lineitem.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/lineitem.sql @@ -19,6 +19,7 @@ CREATE TABLE IF NOT EXISTS lineitem ( UNIQUE KEY(L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER) DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/nation.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/nation.sql index d83e91c001e727..042449c9cffe6f 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/nation.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/nation.sql @@ -7,6 +7,7 @@ CREATE TABLE IF NOT EXISTS nation ( UNIQUE KEY(N_NATIONKEY) DISTRIBUTED BY HASH(N_NATIONKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/orders.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/orders.sql index 58793417aa6390..431469e18e209e 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/orders.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/orders.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS orders ( UNIQUE KEY(O_ORDERKEY) DISTRIBUTED BY HASH(O_ORDERKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/part.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/part.sql index 30881dfc914aa3..a43cefe01c934d 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/part.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/part.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS part ( UNIQUE KEY(P_PARTKEY) DISTRIBUTED BY HASH(P_PARTKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/partsupp.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/partsupp.sql index 6c35db2152fe59..8d29ff0a3eec8a 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/partsupp.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/partsupp.sql @@ -8,6 +8,7 @@ CREATE TABLE IF NOT EXISTS partsupp ( UNIQUE KEY(PS_PARTKEY, PS_SUPPKEY) DISTRIBUTED BY HASH(PS_PARTKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/region.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/region.sql index f83fd250ecb133..4031b3fc260708 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/region.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/region.sql @@ -6,6 +6,7 @@ CREATE TABLE IF NOT EXISTS region ( UNIQUE KEY(R_REGIONKEY) DISTRIBUTED BY HASH(R_REGIONKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/tpch_sf1_unique_p2/ddl/supplier.sql b/regression-test/suites/tpch_sf1_unique_p2/ddl/supplier.sql index 8d09b6848b9e30..71bab5a07a4c7e 100644 --- a/regression-test/suites/tpch_sf1_unique_p2/ddl/supplier.sql +++ b/regression-test/suites/tpch_sf1_unique_p2/ddl/supplier.sql @@ -10,5 +10,6 @@ CREATE TABLE IF NOT EXISTS supplier ( UNIQUE KEY(S_SUPPKEY) DISTRIBUTED BY HASH(S_SUPPKEY) BUCKETS 1 PROPERTIES ( + "enable_mow_light_delete" = "true", "replication_num" = "1" ) diff --git a/regression-test/suites/trash_p0/clean_trash.groovy b/regression-test/suites/trash_p0/clean_trash.groovy index fdfafc4887fdcd..525e8054ce5ca3 100644 --- a/regression-test/suites/trash_p0/clean_trash.groovy +++ b/regression-test/suites/trash_p0/clean_trash.groovy @@ -32,13 +32,15 @@ suite("test_clean_trash", "docker") { options.beConfigs += [ 'max_garbage_sweep_interval=2', 'min_garbage_sweep_interval=1', - 'report_disk_state_interval_seconds=1' + 'report_disk_state_interval_seconds=1', + 'trash_file_expire_time_sec=600' ] options.beNum = 3 docker(options) { + def checkFunc = { boolean trashZero -> def succ = false - for (int i=0; i < 300; ++i) { + dockerAwaitUntil(300) { def bes = sql_return_maparray """show backends""" succ = bes.every { if (trashZero) { @@ -47,10 +49,7 @@ suite("test_clean_trash", "docker") { return !"0.000".equals((it.TrashUsedCapacity).trim()) } } - if (succ) { - break; - } - sleep(1000) + succ } Assert.assertTrue(succ) } diff --git a/regression-test/suites/unique_with_mow_c_p0/test_create_table.groovy b/regression-test/suites/unique_with_mow_c_p0/test_create_table.groovy index 8cd7cb6d19874b..a7810c2a6c3b12 100644 --- a/regression-test/suites/unique_with_mow_c_p0/test_create_table.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/test_create_table.groovy @@ -81,7 +81,7 @@ suite("test_create_table") { "enable_unique_key_merge_on_write" = "true" ); """ - exception "Key cluster column[c_addresses] doesn't exist" + exception "Cluster key column[c_addresses] doesn't exist" } // mow unique table with duplicate cluster keys @@ -184,27 +184,6 @@ suite("test_create_table") { ); """ - // test legacy planner - sql """set enable_nereids_planner=false;""" - // duplicate table with cluster keys - test { - sql """ - CREATE TABLE `$tableName` ( - `c_custkey` int(11) NOT NULL COMMENT "", - `c_name` varchar(26) NOT NULL COMMENT "", - `c_address` varchar(41) NOT NULL COMMENT "", - `c_city` varchar(11) NOT NULL COMMENT "" - ) - DUPLICATE KEY (`c_custkey`) - CLUSTER BY (`c_name`, `c_address`) - DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1" - ); - """ - exception "Syntax error" - } - // test nereids planner sql """set enable_nereids_planner=true;""" sql """set enable_nereids_dml=true;""" diff --git a/regression-test/suites/unique_with_mow_c_p0/test_schema_change.groovy b/regression-test/suites/unique_with_mow_c_p0/test_schema_change.groovy index 9abee82f7c00de..37c96e79a6b428 100644 --- a/regression-test/suites/unique_with_mow_c_p0/test_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/test_schema_change.groovy @@ -48,7 +48,7 @@ suite("test_schema_change") { `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) CLUSTER BY(`cost`, `comment`) - DISTRIBUTED BY HASH(`user_id`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 PROPERTIES ( "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" ); @@ -237,12 +237,12 @@ suite("test_schema_change") { } // 5. modify column order should success (Temporarily throw exception) - test { + /*test { sql """ alter table ${tableName} ORDER BY (`user_id`, `date`, `city`, `age`, `sex`, `max_dwell_time`, `comment`, `min_dwell_time`, `last_visit_date_not_null`, `cost`, `score`, `last_update_date`); """ exception "Can not modify column order in Unique data model table" - } + }*/ /*assertTrue(getAlterTableState(), "alter column order should success"); { sql """ INSERT INTO ${tableName} diff --git a/regression-test/suites/unique_with_mow_c_p0/test_schema_change_ck.groovy b/regression-test/suites/unique_with_mow_c_p0/test_schema_change_ck.groovy new file mode 100644 index 00000000000000..2a6729e84afc69 --- /dev/null +++ b/regression-test/suites/unique_with_mow_c_p0/test_schema_change_ck.groovy @@ -0,0 +1,264 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_schema_change_ck") { + def db = "regression_test_unique_with_mow_c_p0" + def tableName = "test_schema_change_ck" + + def getAlterTableState = { + waitForSchemaChangeDone { + sql """ SHOW ALTER TABLE COLUMN WHERE tablename='${tableName}' ORDER BY createtime DESC LIMIT 1 """ + time 600 + } + return true + } + + sql """ DROP TABLE IF EXISTS ${tableName} """ + if (!isCloudMode()) { + test { + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `c1` int(11) NULL, + `c2` int(11) NULL, + `c3` int(11) NULL + ) unique KEY(`c1`) + cluster by(`c3`, `c2`) + DISTRIBUTED BY HASH(`c1`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true", + "light_schema_change" = "false" + ); + """ + exception "Unique merge-on-write table with cluster keys must enable light schema change" + } + } + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `c1` int(11) NULL, + `c2` int(11) NULL, + `c3` int(11) NULL + ) unique KEY(`c1`) + cluster by(`c3`, `c2`) + PARTITION BY RANGE(`c1`) + ( + PARTITION `p_10000` VALUES [("0"), ("10000")) + ) + DISTRIBUTED BY HASH(`c1`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true" + ); + """ + + sql """ INSERT INTO ${tableName} VALUES (11, 28, 38), (10, 29, 39) """ + qt_select_original """select * from ${tableName}""" + + /****** add value column ******/ + // after cluster key + sql """ alter table ${tableName} ADD column c4 int(11) after c3; """ + assertTrue(getAlterTableState(), "add column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, c4) VALUES (13, 27, 36, 40), (12, 26, 37, 40) """ + qt_select_add_c4 """select * from ${tableName}""" + + // before cluster key + sql """ alter table ${tableName} ADD column c5 int(11) after c1; """ + assertTrue(getAlterTableState(), "add column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, c4, c5) VALUES (15, 20, 34, 40, 50), (14, 20, 35, 40, 50) """ + qt_select_add_c5 """select * from ${tableName}""" + + // in the middle of cluster key + sql """ alter table ${tableName} ADD column c6 int(11) after c2; """ + assertTrue(getAlterTableState(), "add column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, c4, c5, c6) VALUES (17, 20, 32, 40, 50, 60), (16, 20, 33, 40, 50, 60) """ + qt_select_add_c6 """select * from ${tableName}""" + + /****** add key column ******/ + sql """ alter table ${tableName} ADD column k2 int(11) key after c1; """ + assertTrue(getAlterTableState(), "add column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (19, 20, 30, 200), (18, 20, 31, 200) """ + qt_select_add_k2 """select * from ${tableName}""" + + /****** TODO add cluster key column is not supported ******/ + + /****** drop value column ******/ + sql """ alter table ${tableName} drop column c4; """ + assertTrue(getAlterTableState(), "drop column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (119, 20, 30, 200), (118, 20, 31, 200) """ + qt_select_drop_c4 """select * from ${tableName}""" + + sql """ alter table ${tableName} drop column c5; """ + assertTrue(getAlterTableState(), "drop column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (117, 20, 32, 200), (116, 20, 33, 200) """ + qt_select_drop_c5 """select * from ${tableName}""" + + sql """ alter table ${tableName} drop column c6; """ + assertTrue(getAlterTableState(), "drop column should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (115, 25, 34, 200), (114, 24, 35, 200) """ + qt_select_drop_c6 """select * from ${tableName}""" + + /****** drop key column ******/ + test { + sql """ alter table ${tableName} drop column k2; """ + exception "Can not drop key column in Unique data model table" + } + + /****** TODO does not support drop cluster key ******/ + test { + sql """ alter table ${tableName} drop column c3; """ + exception "Can not drop cluster key column in Unique data model table" + } + + /****** reorder ******/ + sql """ alter table ${tableName} order by(c1, k2, c3, c2); """ + assertTrue(getAlterTableState(), "reorder should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (113, 23, 36, 200), (112, 22, 37, 200) """ + qt_select_reorder """select * from ${tableName}""" + + /****** modify key column data type ******/ + sql """ alter table ${tableName} modify column k2 BIGINT key; """ + assertTrue(getAlterTableState(), "modify should success") + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (111, 21, 38, 200), (110, 20, 39, 200) """ + qt_select_modify_k2 """select * from ${tableName}""" + + /****** TODO does not support modify cluster key column data type ******/ + test { + sql """ alter table ${tableName} modify column c2 BIGINT; """ + exception "Can not modify cluster key column" + } + + /****** create mv ******/ + def mv_name = "k2_c3" + sql """DROP MATERIALIZED VIEW IF EXISTS ${mv_name}""" + createMV """ create materialized view ${mv_name} as select c1, c3 from ${tableName}; """ + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (211, 21, 38, 200), (210, 20, 39, 200) """ + qt_select_create_mv_base """select * from ${tableName}""" + qt_select_create_mv_mv """select c1, c3 from ${tableName}""" + + /****** create rollup ******/ + sql """ alter table ${tableName} ADD ROLLUP r1(k2, c1, c2); """ + waitForSchemaChangeDone { + sql """show alter table rollup where tablename='${tableName}' order by createtime desc limit 1""" + time 600 + } + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (311, 21, 38, 200), (310, 20, 39, 200) """ + qt_select_create_rollup_base """select * from ${tableName}""" + qt_select_create_rollup_roll """select k2, c1, c2 from ${tableName}""" + + /****** add partition ******/ + sql "ALTER TABLE ${tableName} ADD PARTITION p_20000 VALUES [('10000'), ('20000'));" + for (int i = 0; i < 10; i++) { + List> partitions = sql "show partitions from ${tableName};" + logger.info("partitions: ${partitions}") + if (partitions.size() < 2 && i < 10) { + sleep(50) + continue + } + assertEquals(partitions.size(), 2) + } + sql """ INSERT INTO ${tableName}(c1, c2, c3, k2) VALUES (10011, 21, 38, 200), (10010, 20, 39, 200) """ + qt_select_add_partition """select * from ${tableName} partition (p_20000)""" + + /****** one sql contain multi column changes ******/ + + /****** truncate table ******/ + sql """ TRUNCATE TABLE ${tableName} """ + sql """ INSERT INTO ${tableName}(c1, c2, c3) VALUES (11, 28, 38), (10, 29, 39), (12, 26, 37), (13, 27, 36) """ + qt_select_truncate """select * from ${tableName}""" + + /****** create table with rollup ******/ + tableName = tableName + "_rollup" + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `k1` int(11) NULL, + `k2` int(11) NULL, + `c3` int(11) NULL, + `c4` int(11) NULL, + `c5` int(11) NULL + ) unique KEY(`k1`, `k2`) + cluster by(`c4`, `c5`) + DISTRIBUTED BY HASH(`k1`) BUCKETS 1 + ROLLUP ( + r1 (k2, k1, c4, c3) + ) + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true" + ); + """ + sql """ INSERT INTO ${tableName} VALUES (11, 21, 32, 42, 52), (12, 22, 31, 41, 51); """ + qt_select_rollup_base """select * from ${tableName};""" + qt_select_rollup_roll """select k2, k1, c4, c3 from ${tableName};""" + + /****** specify index, not base index ******/ + sql """ ALTER TABLE ${tableName} ORDER BY(k2, k1, c3, c4) from r1; """ + assertTrue(getAlterTableState(), "reorder rollup should success") + qt_select_rollup_base_sc """select * from ${tableName};""" + qt_select_rollup_roll_sc """select k2, k1, c4, c3 from ${tableName};""" + sql """ INSERT INTO ${tableName} VALUES (13, 23, 34, 44, 54), (14, 24, 33, 43, 53); """ + qt_select_rollup_base_sc1 """select * from ${tableName};""" + qt_select_rollup_roll_sc1 """select k2, k1, c4, c3 from ${tableName};""" + + /****** backup restore ******/ + if (!isCloudMode()) { + def repoName = "repo_" + UUID.randomUUID().toString().replace("-", "") + def backup = tableName + "_bak" + def syncer = getSyncer() + syncer.createS3Repository(repoName) + def result = sql """ show tablets from ${tableName}; """ + logger.info("tablets 0: ${result}") + + // backup + sql """ BACKUP SNAPSHOT ${context.dbName}.${backup} TO ${repoName} ON (${tableName}) properties("type"="full"); """ + syncer.waitSnapshotFinish() + def snapshot = syncer.getSnapshotTimestamp(repoName, backup) + assertTrue(snapshot != null) + sql """ INSERT INTO ${tableName} VALUES (15, 25, 34, 44, 54), (16, 26, 33, 43, 53); """ + qt_select_restore_base2 """select * from ${tableName};""" + qt_select_restore_roll2 """select k2, k1, c4, c3 from ${tableName};""" + + // restore + logger.info(""" RESTORE SNAPSHOT ${context.dbName}.${backup} FROM `${repoName}` ON (`${tableName}`) PROPERTIES ("backup_timestamp" = "${snapshot}","replication_num" = "1" ) """) + sql """ RESTORE SNAPSHOT ${context.dbName}.${backup} FROM `${repoName}` ON (`${tableName}`) PROPERTIES ("backup_timestamp" = "${snapshot}","replication_num" = "1" ) """ + syncer.waitAllRestoreFinish(context.dbName) + result = sql """ show tablets from ${tableName}; """ + logger.info("tablets 1: ${result}") + qt_select_restore_base """select * from ${tableName};""" + qt_select_restore_roll """select k2, k1, c4, c3 from ${tableName};""" + sql """ INSERT INTO ${tableName} VALUES (17, 27, 34, 44, 54), (18, 28, 33, 43, 53); """ + qt_select_restore_base1 """select * from ${tableName};""" + qt_select_restore_roll1 """select k2, k1, c4, c3 from ${tableName};""" + + // restore + sql """ drop table ${tableName}; """ + sql """ RESTORE SNAPSHOT ${context.dbName}.${backup} FROM `${repoName}` ON (`${tableName}`) PROPERTIES ("backup_timestamp" = "${snapshot}","replication_num" = "1" ) """ + syncer.waitAllRestoreFinish(context.dbName) + result = sql """ show tablets from ${tableName}; """ + logger.info("tablets 2: ${result}") + qt_select_restore_base2 """select * from ${tableName};""" + qt_select_restore_roll2 """select k2, k1, c4, c3 from ${tableName};""" + sql """ INSERT INTO ${tableName} VALUES (17, 27, 34, 44, 54), (18, 28, 33, 43, 53); """ + qt_select_restore_base3 """select * from ${tableName};""" + qt_select_restore_roll4 """select k2, k1, c4, c3 from ${tableName};""" + + sql "DROP REPOSITORY `${repoName}`" + } + +} diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_create.sql index 30df14525cf71c..2214554814536e 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_create.sql @@ -12,6 +12,7 @@ UNIQUE KEY (`c_custkey`) CLUSTER BY (`c_region`, `c_phone`, `c_city`) DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "disable_auto_compaction" = "true", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql index 892384684bf540..9508841da0fcca 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql @@ -12,6 +12,7 @@ UNIQUE KEY (`c_custkey`) CLUSTER BY (`c_mktsegment`, `c_city`, `c_region`, `c_nation`) DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_create.sql index ffd796f227a287..5e2490b3e72d3a 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_create.sql @@ -21,6 +21,7 @@ UNIQUE KEY (`d_datekey`) CLUSTER BY (`d_sellingseason`, `d_holidayfl`) DISTRIBUTED BY HASH(`d_datekey`) BUCKETS 1 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "disable_auto_compaction" = "true", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql index 9ec46190c794f2..447c6276a241f2 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql @@ -21,6 +21,7 @@ UNIQUE KEY (`d_datekey`) CLUSTER BY (`d_sellingseason`, `d_lastdayinweekfl`) DISTRIBUTED BY HASH(`d_datekey`) BUCKETS 1 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql index 0945fe0af46982..094015d3ec4050 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql @@ -29,6 +29,7 @@ PARTITION p1997 VALUES [("19970101"), ("19980101")), PARTITION p1998 VALUES [("19980101"), ("19990101"))) DISTRIBUTED BY HASH(`lo_orderkey`) BUCKETS 48 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "disable_auto_compaction" = "true", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql index a9b1d4115612f0..39502312434b8b 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql @@ -29,6 +29,7 @@ PARTITION p1997 VALUES [("19970101"), ("19980101")), PARTITION p1998 VALUES [("19980101"), ("19990101"))) DISTRIBUTED BY HASH(`lo_orderkey`) BUCKETS 48 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_create.sql index 86e906b4c2b2e6..b98d195979a499 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_create.sql @@ -13,6 +13,7 @@ UNIQUE KEY (`p_partkey`) CLUSTER BY (`p_color`, `p_name`, `p_category`) DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "disable_auto_compaction" = "true", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql index 5ba4038e12d709..f6f7a84dc21731 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql @@ -13,6 +13,7 @@ UNIQUE KEY (`p_partkey`) CLUSTER BY (`p_size`, `p_type`, `p_partkey`) DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_create.sql index 404e4987b444b8..85e10df302fdd9 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_create.sql @@ -11,6 +11,7 @@ UNIQUE KEY (`s_suppkey`) CLUSTER BY (`s_address`) DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "disable_auto_compaction" = "true", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql index 74fa9c46baa222..356a6079f04029 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql @@ -11,6 +11,7 @@ UNIQUE KEY (`s_suppkey`) CLUSTER BY (`s_nation`, `s_region`, `s_city`, `s_name`) DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_type" = 'int', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/customer_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/customer_create.sql index 9e201b44646ecf..5ade9e170ad35a 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/customer_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/customer_create.sql @@ -12,6 +12,7 @@ UNIQUE KEY (`c_custkey`) CLUSTER BY (`c_region`, `c_address`, `c_city`, `c_name`) DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/date_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/date_create.sql index 3d12170cf99a82..0ae14f534ca900 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/date_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/date_create.sql @@ -21,6 +21,7 @@ UNIQUE KEY (`d_datekey`) CLUSTER BY (`d_weeknuminyear`, `d_month`) DISTRIBUTED BY HASH(`d_datekey`) BUCKETS 1 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql index b9481142be1121..4d213e2ff14186 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql @@ -29,6 +29,7 @@ PARTITION p1997 VALUES [("19970101"), ("19980101")), PARTITION p1998 VALUES [("19980101"), ("19990101"))) DISTRIBUTED BY HASH(`lo_orderkey`) BUCKETS 48 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/part_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/part_create.sql index 3975ff83f09fe5..980c57e0a90fc2 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/part_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/part_create.sql @@ -13,6 +13,7 @@ UNIQUE KEY (`p_partkey`) CLUSTER BY (`p_color`, `p_container`) DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql index 7e101c5667f516..78d4e332efbe43 100644 --- a/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql +++ b/regression-test/suites/unique_with_mow_c_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql @@ -11,6 +11,7 @@ UNIQUE KEY (`s_suppkey`) CLUSTER BY (`s_address`, `s_name`) DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_new_partial_update_delete.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_new_partial_update_delete.groovy index 2157388c0f7ddc..32cb8eedc1279c 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_new_partial_update_delete.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_new_partial_update_delete.groovy @@ -140,120 +140,6 @@ suite('test_new_partial_update_delete') { sql "set enable_insert_strict=true;" sql "drop table if exists ${tableName1};" - - - - // old planner - try { - def tableMorName3 = "test_new_partial_update_mor_delete3" - sql "DROP TABLE IF EXISTS ${tableMorName3};" - sql """ CREATE TABLE IF NOT EXISTS ${tableMorName3} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "disable_auto_compaction" = "true", - "enable_unique_key_merge_on_write" = "false", - "enable_mow_light_delete" = "true", - "replication_num" = "1", - "store_row_column" = "${use_row_store}"); """ - } catch (Exception e) { - log.info(e.getMessage()) - assertTrue(e.getMessage().contains('enable_mow_light_delete property is only supported for unique merge-on-write table')) - } - - try { - def tableMorName4 = "test_new_partial_update_mor_delete4" - sql "DROP TABLE IF EXISTS ${tableMorName4};" - sql """ CREATE TABLE IF NOT EXISTS ${tableMorName4} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "disable_auto_compaction" = "true", - "enable_unique_key_merge_on_write" = "false", - "replication_num" = "1", - "store_row_column" = "${use_row_store}"); """ - sql """alter table ${tableMorName4} set ("enable_mow_light_delete"="true")""" - } catch (Exception e) { - log.info(e.getMessage()) - assertTrue(e.getMessage().contains('enable_mow_light_delete property is only supported for unique merge-on-write table')) - } - sql "set enable_nereids_planner=false" - def tableName2 = "test_new_partial_update_delete2" - sql "DROP TABLE IF EXISTS ${tableName2};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( - `k1` int NOT NULL, - `c1` int, - `c2` int, - `c3` int, - `c4` int - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "disable_auto_compaction" = "true", - "enable_mow_light_delete" = "false", - "replication_num" = "1", - "store_row_column" = "${use_row_store}"); """ - - def output3 = sql "show create table ${tableName2}" - assertTrue output3[0][1].contains("\"enable_mow_light_delete\" = \"false\""); - sql "insert into ${tableName2} values(1,1,1,1,1)" - // 1,1,1,1,1 - qt_sql21 "select * from ${tableName2} order by k1;" - sql "delete from ${tableName2} where k1 = 1" - // empty - qt_sql22 "select * from ${tableName2} order by k1;" - sql "set show_hidden_columns = true;" - // 1,null,null,null,1 - qt_sql23 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName2} order by k1;" - sql "set show_hidden_columns = false;" - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict=false;" - sql "insert into ${tableName2} (k1,c1) values(1,2)" - // 1,2,NULL,NULL,NULL - qt_sql24 "select * from ${tableName2} order by k1;" - - - - sql """alter table ${tableName2} set ("enable_mow_light_delete"="true") """ - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict=true;" - def output4 = sql "show create table ${tableName2}" - assertTrue output4[0][1].contains("\"enable_mow_light_delete\" = \"true\""); - sql "insert into ${tableName2} values(2,2,2,2,2)" - // 1,2,NULL,NULL,NULL - // 2,2,2,2,2 - qt_sql31 "select * from ${tableName2} order by k1;" - sql "delete from ${tableName2} where k1 <= 2" - // empty - qt_sql32 "select * from ${tableName2} order by k1;" - sql "set show_hidden_columns = true;" - // empty - qt_sql33 "select * from ${tableName2} order by k1;" - sql "set show_hidden_columns = false;" - sql "set skip_delete_predicate = true;" - // 1,2,NULL,NULL,NULL - // 2,2,2,2,2 - qt_sql34 "select * from ${tableName2} order by k1;" - sql "set skip_delete_predicate = false;" - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict=false;" - sql "insert into ${tableName2} (k1,c1) values(2,3)" - // 2,3,2,2,2 - qt_sql35 "select * from ${tableName2} order by k1;" - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict=true;" - - sql "drop table if exists ${tableName2};" } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_after_delete.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_after_delete.groovy index 97a76567688b1e..9757b9e7685846 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_after_delete.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_after_delete.groovy @@ -25,8 +25,6 @@ suite("test_partial_update_after_delete", "p0") { connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { sql "use ${db};" - sql "SET enable_nereids_planner=true;" - sql "SET enable_fallback_to_original_planner=false;" sql "set enable_unique_key_partial_update=false;" sql "set enable_insert_strict=true;" def tableName1 = "test_partial_update_after_delete1" @@ -50,32 +48,6 @@ suite("test_partial_update_after_delete", "p0") { sql "set enable_insert_strict=false;" sql "insert into ${tableName1}(k1, v1) values(1,2);" qt_select1 "select * from ${tableName1};" - - sql "set enable_unique_key_partial_update=false;" - sql "set enable_insert_strict=true;" - sql "SET enable_nereids_planner=false;" - sql "SET enable_fallback_to_original_planner=false;" - def tableName2 = "test_partial_update_after_delete2" - sql "DROP TABLE IF EXISTS ${tableName2};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( - `k1` INT NULL, - `v1` INT NULL, - `v2` INT NULL - )UNIQUE KEY(k1) - DISTRIBUTED BY HASH(k1) BUCKETS 1 - PROPERTIES ( - "enable_unique_key_merge_on_write" = "true", - "disable_auto_compaction" = "true", - "enable_mow_light_delete" = "false", - "replication_num" = "1", - "store_row_column" = "${use_row_store}"); """ - - sql "insert into ${tableName2} values(1,1,1);" - sql "delete from ${tableName2} where k1=1;" - sql "set enable_unique_key_partial_update=true;" - sql "set enable_insert_strict=false;" - sql "insert into ${tableName2}(k1, v1) values(1,2);" - qt_select2 "select * from ${tableName2};" } } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.groovy index ec46939b2f5855..1ec60fbb10e837 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_auto_inc.groovy @@ -19,100 +19,90 @@ suite("test_partial_update_auto_inc") { String db = context.config.getDbNameByFile(context.file) sql "select 1;" // to create database - for (def use_nereids_planner : [false, true]) { - logger.info("current params: use_nereids_planner: ${use_nereids_planner}") - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql "use ${db};" + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" - if (use_nereids_planner) { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc """ - sql """ CREATE TABLE test_primary_key_partial_update_auto_inc ( + sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc """ + sql """ CREATE TABLE test_primary_key_partial_update_auto_inc ( `id` BIGINT NOT NULL AUTO_INCREMENT, `name` varchar(65533) NOT NULL COMMENT "用户姓名" ) UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true"); """ - sql """ set enable_unique_key_partial_update=true; """ - sql "sync" - // insert stmt only misses auto-inc key column - sql """ insert into test_primary_key_partial_update_auto_inc(name) values("doris1"); """ - sql """ set enable_unique_key_partial_update=false; """ - sql "sync" - sql """ insert into test_primary_key_partial_update_auto_inc(name) values("doris2"); """ - // stream load only misses auto-inc key column - streamLoad { - table "test_primary_key_partial_update_auto_inc" - set 'partial_columns', 'true' - set 'column_separator', ',' - set 'columns', 'name' - file 'partial_update_autoinc1.csv' - time 10000 - } - qt_select_1 """ select name from test_primary_key_partial_update_auto_inc order by name; """ - qt_select_2 """ select count(distinct id) from test_primary_key_partial_update_auto_inc; """ + sql """ set enable_unique_key_partial_update=true; """ + sql "sync" + // insert stmt only misses auto-inc key column + sql """ insert into test_primary_key_partial_update_auto_inc(name) values("doris1"); """ + sql """ set enable_unique_key_partial_update=false; """ + sql "sync" + sql """ insert into test_primary_key_partial_update_auto_inc(name) values("doris2"); """ + // stream load only misses auto-inc key column + streamLoad { + table "test_primary_key_partial_update_auto_inc" + set 'partial_columns', 'true' + set 'column_separator', ',' + set 'columns', 'name' + file 'partial_update_autoinc1.csv' + time 10000 + } + qt_select_1 """ select name from test_primary_key_partial_update_auto_inc order by name; """ + qt_select_2 """ select count(distinct id) from test_primary_key_partial_update_auto_inc; """ - sql """ set enable_unique_key_partial_update=true; """ - sql "sync" - // insert stmt withou column list - sql """ insert into test_primary_key_partial_update_auto_inc values(100,"doris5"); """ - // insert stmt, column list include all visible columns - sql """ insert into test_primary_key_partial_update_auto_inc(id,name) values(102,"doris6"); """ - sql """ set enable_unique_key_partial_update=false; """ - sql "sync" - sql """ insert into test_primary_key_partial_update_auto_inc values(101, "doris7"); """ - // stream load withou column list - streamLoad { - table "test_primary_key_partial_update_auto_inc" - set 'partial_columns', 'true' - set 'column_separator', ',' - file 'partial_update_autoinc2.csv' - time 10000 - } - // stream load, column list include all visible columns - streamLoad { - table "test_primary_key_partial_update_auto_inc" - set 'partial_columns', 'true' - set 'column_separator', ',' - set 'columns', 'id,name' - file 'partial_update_autoinc3.csv' - time 10000 - } - qt_select_3 """ select name from test_primary_key_partial_update_auto_inc order by name; """ - qt_select_4 """ select count(distinct id) from test_primary_key_partial_update_auto_inc; """ - sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc """ + sql """ set enable_unique_key_partial_update=true; """ + sql "sync" + // insert stmt withou column list + sql """ insert into test_primary_key_partial_update_auto_inc values(100,"doris5"); """ + // insert stmt, column list include all visible columns + sql """ insert into test_primary_key_partial_update_auto_inc(id,name) values(102,"doris6"); """ + sql """ set enable_unique_key_partial_update=false; """ + sql "sync" + sql """ insert into test_primary_key_partial_update_auto_inc values(101, "doris7"); """ + // stream load withou column list + streamLoad { + table "test_primary_key_partial_update_auto_inc" + set 'partial_columns', 'true' + set 'column_separator', ',' + file 'partial_update_autoinc2.csv' + time 10000 + } + // stream load, column list include all visible columns + streamLoad { + table "test_primary_key_partial_update_auto_inc" + set 'partial_columns', 'true' + set 'column_separator', ',' + set 'columns', 'id,name' + file 'partial_update_autoinc3.csv' + time 10000 + } + qt_select_3 """ select name from test_primary_key_partial_update_auto_inc order by name; """ + qt_select_4 """ select count(distinct id) from test_primary_key_partial_update_auto_inc; """ + sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc """ - sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc2 """ - sql """ CREATE TABLE test_primary_key_partial_update_auto_inc2 ( + sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc2 """ + sql """ CREATE TABLE test_primary_key_partial_update_auto_inc2 ( `id` BIGINT NOT NULL, `c1` int, `c2` int, `cid` BIGINT NOT NULL AUTO_INCREMENT) UNIQUE KEY(`id`) DISTRIBUTED BY HASH(`id`) BUCKETS 1 PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true"); """ - sql "insert into test_primary_key_partial_update_auto_inc2 values(1,10,10,10),(2,20,20,20),(3,30,30,30),(4,40,40,40);" - order_qt_select_5 "select * from test_primary_key_partial_update_auto_inc2" - sql """ set enable_unique_key_partial_update=true; """ - sql "sync;" - // insert stmt only misses auto-inc value column, its value should not change when do partial update - sql "insert into test_primary_key_partial_update_auto_inc2(id,c1,c2) values(1,99,99),(2,99,99);" - // stream load only misses auto-inc value column, its value should not change when do partial update - streamLoad { - table "test_primary_key_partial_update_auto_inc2" - set 'partial_columns', 'true' - set 'column_separator', ',' - set 'columns', 'id,c1,c2' - file 'partial_update_autoinc4.csv' - time 10000 - } - order_qt_select_6 "select * from test_primary_key_partial_update_auto_inc2" - sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc2 """ + sql "insert into test_primary_key_partial_update_auto_inc2 values(1,10,10,10),(2,20,20,20),(3,30,30,30),(4,40,40,40);" + order_qt_select_5 "select * from test_primary_key_partial_update_auto_inc2" + sql """ set enable_unique_key_partial_update=true; """ + sql "sync;" + // insert stmt only misses auto-inc value column, its value should not change when do partial update + sql "insert into test_primary_key_partial_update_auto_inc2(id,c1,c2) values(1,99,99),(2,99,99);" + // stream load only misses auto-inc value column, its value should not change when do partial update + streamLoad { + table "test_primary_key_partial_update_auto_inc2" + set 'partial_columns', 'true' + set 'column_separator', ',' + set 'columns', 'id,c1,c2' + file 'partial_update_autoinc4.csv' + time 10000 } + order_qt_select_6 "select * from test_primary_key_partial_update_auto_inc2" + sql """ DROP TABLE IF EXISTS test_primary_key_partial_update_auto_inc2 """ } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.groovy index 4b9f1feef75ae6..4e114349ce025c 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_case_insensitivity.groovy @@ -21,20 +21,11 @@ suite("test_partial_update_case_insensitivity", "p0") { sql "select 1;" // to create database for (def use_row_store : [false, true]) { - for (def use_nereids_planner : [false, true]) { - logger.info("current params: use_row_store: ${use_row_store}, use_nereids_planner: ${use_nereids_planner}") - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql "use ${db};" - if (use_nereids_planner) { - sql """ set enable_nereids_planner=true; """ - sql """ set enable_fallback_to_original_planner=false; """ - } else { - sql """ set enable_nereids_planner = false; """ - } - - def tableName = "test_partial_update_case_insensitivity" - sql """ DROP TABLE IF EXISTS ${tableName} """ - sql """ CREATE TABLE ${tableName} ( + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + def tableName = "test_partial_update_case_insensitivity" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( name varchar(300), status int, MY_COLUMN int, @@ -46,24 +37,23 @@ suite("test_partial_update_case_insensitivity", "p0") { "replication_allocation" = "tag.location.default: 1", "enable_unique_key_merge_on_write" = "true", "store_row_column" = "${use_row_store}");""" - - sql "set enable_unique_key_partial_update = true;" - sql "set enable_insert_strict = false;" - sql "sync;" - sql """ insert into ${tableName}(name, STATUS) values("t1", 1); """ - qt_sql "select * from ${tableName} order by name;" - sql """ insert into ${tableName}(name, my_column) values("t1", 2); """ - qt_sql "select * from ${tableName} order by name;" - sql """ insert into ${tableName}(name, My_Column, uPaNddOWN) values("t2", 20, 30); """ - qt_sql "select * from ${tableName} order by name;" - sql """ insert into ${tableName}(NAME, StAtUs, upanddown) values("t1", 999, 888); """ - qt_sql "select * from ${tableName} order by name;" - sql """ insert into ${tableName}(NaMe, StAtUs, mY_CoLUmN, upAndDoWn) values("t3", 123, 456, 789); """ - qt_sql "select * from ${tableName} order by name;" + sql "set enable_unique_key_partial_update = true;" + sql "set enable_insert_strict = false;" + sql "sync;" + + sql """ insert into ${tableName}(name, STATUS) values("t1", 1); """ + qt_sql "select * from ${tableName} order by name;" + sql """ insert into ${tableName}(name, my_column) values("t1", 2); """ + qt_sql "select * from ${tableName} order by name;" + sql """ insert into ${tableName}(name, My_Column, uPaNddOWN) values("t2", 20, 30); """ + qt_sql "select * from ${tableName} order by name;" + sql """ insert into ${tableName}(NAME, StAtUs, upanddown) values("t1", 999, 888); """ + qt_sql "select * from ${tableName} order by name;" + sql """ insert into ${tableName}(NaMe, StAtUs, mY_CoLUmN, upAndDoWn) values("t3", 123, 456, 789); """ + qt_sql "select * from ${tableName} order by name;" - sql """ DROP TABLE IF EXISTS ${tableName} """ - } + sql """ DROP TABLE IF EXISTS ${tableName} """ } } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy index 01b888a7e550bf..7b25982f896849 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete.groovy @@ -20,22 +20,15 @@ suite('test_partial_update_delete') { String db = context.config.getDbNameByFile(context.file) sql "select 1;" // to create database - for (def use_nereids : [true, false]) { - for (def use_row_store : [false, true]) { - logger.info("current params: use_nereids: ${use_nereids}, use_row_store: ${use_row_store}") - connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { - sql "use ${db};" - if (use_nereids) { - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - } else { - sql "set enable_nereids_planner=false" - } - sql "sync;" - - def tableName1 = "test_partial_update_delete1" - sql "DROP TABLE IF EXISTS ${tableName1};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + sql "sync;" + + def tableName1 = "test_partial_update_delete1" + sql "DROP TABLE IF EXISTS ${tableName1};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName1} ( `k1` int NOT NULL, `c1` int, `c2` int, @@ -51,9 +44,9 @@ suite('test_partial_update_delete') { "store_row_column" = "${use_row_store}"); """ - def tableName2 = "test_partial_update_delete2" - sql "DROP TABLE IF EXISTS ${tableName2};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( + def tableName2 = "test_partial_update_delete2" + sql "DROP TABLE IF EXISTS ${tableName2};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName2} ( `k` BIGINT NULL ) UNIQUE KEY(k) DISTRIBUTED BY HASH(k) BUCKETS 1 @@ -64,29 +57,29 @@ suite('test_partial_update_delete') { "replication_num" = "1", "store_row_column" = "${use_row_store}"); """ - sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" - qt_sql1 "select * from ${tableName1} order by k1;" - sql "insert into ${tableName2} values(1),(3);" - sql "delete from ${tableName1} A using ${tableName2} B where A.k1=B.k;" - qt_sql1 "select * from ${tableName1} order by k1;" - - sql "delete from ${tableName1} where c2=2;" - qt_sql1 "select * from ${tableName1} order by k1;" - - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - qt_with_delete_sign1 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName1};" - sql "drop table if exists ${tableName2};" - - - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - def tableName3 = "test_partial_update_delete3" - sql "DROP TABLE IF EXISTS ${tableName3};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName3} ( + sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" + qt_sql1 "select * from ${tableName1} order by k1;" + sql "insert into ${tableName2} values(1),(3);" + sql "delete from ${tableName1} A using ${tableName2} B where A.k1=B.k;" + qt_sql1 "select * from ${tableName1} order by k1;" + + sql "delete from ${tableName1} where c2=2;" + qt_sql1 "select * from ${tableName1} order by k1;" + + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + qt_with_delete_sign1 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName1} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName1};" + sql "drop table if exists ${tableName2};" + + + sql "set skip_delete_sign=false;" + sql "set skip_storage_engine_merge=false;" + sql "set skip_delete_bitmap=false;" + def tableName3 = "test_partial_update_delete3" + sql "DROP TABLE IF EXISTS ${tableName3};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName3} ( `k1` int NOT NULL, `c1` int, `c2` int, @@ -101,43 +94,43 @@ suite('test_partial_update_delete') { "replication_num" = "1", "store_row_column" = "${use_row_store}"); """ - sql "insert into ${tableName3} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5),(6,6,6,6,6);" - qt_sql2 "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" - streamLoad { - table "${tableName3}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'k1' - set 'partial_columns', 'true' - set 'merge_type', 'DELETE' - file 'partial_update_delete.csv' - time 10000 - } - sql "sync" - qt_sql2 "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" - - sql "set enable_insert_strict=false;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - sql "insert into ${tableName3}(k1, __DORIS_DELETE_SIGN__) values(8,1),(4,1),(9,1);" - qt_sql2 "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" - sql "set enable_insert_strict=true;" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - qt_sql2 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName3} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName3};" - - - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - def tableName4 = "test_partial_update_delete4" - sql "DROP TABLE IF EXISTS ${tableName4};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName4} ( + sql "insert into ${tableName3} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5),(6,6,6,6,6);" + qt_sql2 "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" + streamLoad { + table "${tableName3}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k1' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + file 'partial_update_delete.csv' + time 10000 + } + sql "sync" + qt_sql2 "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" + + sql "set enable_insert_strict=false;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName3}(k1, __DORIS_DELETE_SIGN__) values(8,1),(4,1),(9,1);" + qt_sql2 "select k1,c1,c2,c3,c4 from ${tableName3} order by k1,c1,c2,c3,c4;" + sql "set enable_insert_strict=true;" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + qt_sql2 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__ from ${tableName3} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName3};" + + + sql "set skip_delete_sign=false;" + sql "set skip_storage_engine_merge=false;" + sql "set skip_delete_bitmap=false;" + def tableName4 = "test_partial_update_delete4" + sql "DROP TABLE IF EXISTS ${tableName4};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName4} ( `k1` int NOT NULL, `c1` int, `c2` int, @@ -153,44 +146,44 @@ suite('test_partial_update_delete') { "store_row_column" = "${use_row_store}", "function_column.sequence_col" = "c3"); """ - sql "insert into ${tableName4} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5),(6,6,6,6,6);" - qt_sql3 "select k1,c1,c2,c3,c4 from ${tableName4} order by k1,c1,c2,c3,c4;" - // if the table has sequence map col, can not set sequence map col when merge_type=delete - streamLoad { - table "${tableName4}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'k1' - set 'partial_columns', 'true' - set 'merge_type', 'DELETE' - file 'partial_update_delete.csv' - time 10000 - } - sql "sync" - qt_sql3 "select k1,c1,c2,c3,c4 from ${tableName4} order by k1,c1,c2,c3,c4;" - - sql "set enable_insert_strict=false;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - sql "insert into ${tableName4}(k1, __DORIS_DELETE_SIGN__) values(8,1),(4,1),(9,1);" - qt_sql3 "select k1,c1,c2,c3,c4 from ${tableName4} order by k1,c1,c2,c3,c4;" - sql "set enable_insert_strict=true;" - sql "set enable_unique_key_partial_update=false;" - sql "sync;" - - sql "set skip_delete_sign=true;" - sql "set skip_storage_engine_merge=true;" - sql "set skip_delete_bitmap=true;" - qt_sql3 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__ from ${tableName4} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" - sql "drop table if exists ${tableName4};" - - - sql "set skip_delete_sign=false;" - sql "set skip_storage_engine_merge=false;" - sql "set skip_delete_bitmap=false;" - def tableName5 = "test_partial_update_delete5" - sql "DROP TABLE IF EXISTS ${tableName5};" - sql """ CREATE TABLE IF NOT EXISTS ${tableName5} ( + sql "insert into ${tableName4} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5),(6,6,6,6,6);" + qt_sql3 "select k1,c1,c2,c3,c4 from ${tableName4} order by k1,c1,c2,c3,c4;" + // if the table has sequence map col, can not set sequence map col when merge_type=delete + streamLoad { + table "${tableName4}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k1' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + file 'partial_update_delete.csv' + time 10000 + } + sql "sync" + qt_sql3 "select k1,c1,c2,c3,c4 from ${tableName4} order by k1,c1,c2,c3,c4;" + + sql "set enable_insert_strict=false;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName4}(k1, __DORIS_DELETE_SIGN__) values(8,1),(4,1),(9,1);" + qt_sql3 "select k1,c1,c2,c3,c4 from ${tableName4} order by k1,c1,c2,c3,c4;" + sql "set enable_insert_strict=true;" + sql "set enable_unique_key_partial_update=false;" + sql "sync;" + + sql "set skip_delete_sign=true;" + sql "set skip_storage_engine_merge=true;" + sql "set skip_delete_bitmap=true;" + qt_sql3 "select k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__ from ${tableName4} order by k1,c1,c2,c3,c4,__DORIS_DELETE_SIGN__;" + sql "drop table if exists ${tableName4};" + + + sql "set skip_delete_sign=false;" + sql "set skip_storage_engine_merge=false;" + sql "set skip_delete_bitmap=false;" + def tableName5 = "test_partial_update_delete5" + sql "DROP TABLE IF EXISTS ${tableName5};" + sql """ CREATE TABLE IF NOT EXISTS ${tableName5} ( `k1` int NOT NULL, `c1` int, `c2` int, @@ -205,30 +198,29 @@ suite('test_partial_update_delete') { "replication_num" = "1", "store_row_column" = "${use_row_store}", "function_column.sequence_type" = "int"); """ - sql "insert into ${tableName5}(k1,c1,c2,c3,c4,__DORIS_SEQUENCE_COL__) values(1,1,1,1,1,1),(2,2,2,2,2,2),(3,3,3,3,3,3),(4,4,4,4,4,4),(5,5,5,5,5,5),(6,6,6,6,6,6);" - qt_sql4 "select k1,c1,c2,c3,c4 from ${tableName5} order by k1,c1,c2,c3,c4;" - // if the table has sequence type col, users must set sequence col even if merge_type=delete - streamLoad { - table "${tableName5}" - set 'column_separator', ',' - set 'format', 'csv' - set 'columns', 'k1' - set 'partial_columns', 'true' - set 'merge_type', 'DELETE' - file 'partial_update_delete.csv' - time 10000 - check { result, exception, startTime, endTime -> - if (exception != null) { - throw exception - } - log.info("Stream load result: ${result}".toString()) - def json = parseJson(result) - assertEquals("fail", json.Status.toLowerCase()) - assertTrue(json.Message.contains('need to specify the sequence column')) + sql "insert into ${tableName5}(k1,c1,c2,c3,c4,__DORIS_SEQUENCE_COL__) values(1,1,1,1,1,1),(2,2,2,2,2,2),(3,3,3,3,3,3),(4,4,4,4,4,4),(5,5,5,5,5,5),(6,6,6,6,6,6);" + qt_sql4 "select k1,c1,c2,c3,c4 from ${tableName5} order by k1,c1,c2,c3,c4;" + // if the table has sequence type col, users must set sequence col even if merge_type=delete + streamLoad { + table "${tableName5}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k1' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + file 'partial_update_delete.csv' + time 10000 + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("fail", json.Status.toLowerCase()) + assertTrue(json.Message.contains('need to specify the sequence column')) } - sql "drop table if exists ${tableName5};" } + sql "drop table if exists ${tableName5};" } } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy index 6c15c9562d9c91..3388b78eb72dc9 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_light_schema_change.groovy @@ -264,11 +264,11 @@ suite("test_partial_update_insert_light_schema_change", "p0") { sql "sync" // test insert data with all key column, should fail because - // it don't have any value columns + // it inserts a new row in strict mode sql "set enable_unique_key_partial_update=true;" test { sql "insert into ${tableName}(c0,c1) values(1, 1);" - exception "INTERNAL_ERROR" + exception "Insert has filtered data in strict mode" } sql "insert into ${tableName}(c0,c1,c2) values(1,0,10);" sql "set enable_unique_key_partial_update=false;" diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy index b648243bb78f8a..85b4328b76f76b 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_insert_schema_change.groovy @@ -249,11 +249,11 @@ suite("test_partial_update_insert_schema_change", "p0") { sql "sync" // test insert data with all key column, should fail because - // it don't have any value columns + // it inserts a new row in strict mode sql "set enable_unique_key_partial_update=true;" test { sql "insert into ${tableName}(c0,c1) values(1, 1);" - exception "INTERNAL_ERROR" + exception "Insert has filtered data in strict mode" } sql "insert into ${tableName}(c0,c1,c2) values(1,0,10);" sql "set enable_unique_key_partial_update=false;" diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_merge_type.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_merge_type.groovy new file mode 100644 index 00000000000000..ece523838b0076 --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_merge_type.groovy @@ -0,0 +1,185 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partial_update_merge_type", "p0") { + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + def inspect_rows = { sqlStr -> + sql "set skip_delete_sign=true;" + sql "set skip_delete_bitmap=true;" + sql "sync" + qt_inspect sqlStr + sql "set skip_delete_sign=false;" + sql "set skip_delete_bitmap=false;" + sql "sync" + } + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + + def tableName = "test_partial_update_merge_type" + sql """ DROP TABLE IF EXISTS ${tableName} force""" + sql """ CREATE TABLE ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` int, + `c3` int) + UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName} select number,number,number,number from numbers("number"="9");""" + sql "sync" + qt_sql """select * from ${tableName} order by k;""" + // 1.1 merge_type=MERGE, no sequence col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,c2,del' + set 'partial_columns', 'true' + set 'merge_type', 'MERGE' + set 'delete', 'del=1' + file 'merge1.csv' + time 10000 + } + qt_sql_1_1 """select * from ${tableName} order by k;""" + // 1.2 merge_type=MERGE, no sequence col, no value col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,del' + set 'partial_columns', 'true' + set 'merge_type', 'MERGE' + set 'delete', 'del=1' + file 'merge5.csv' + time 10000 + } + qt_sql_1_2 """select * from ${tableName} order by k;""" + // 2.1 merge_type=DELETE, no sequence col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,c1' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + file 'merge2.csv' + time 10000 + } + qt_sql_2_1 """select * from ${tableName} order by k;""" + // 2.2 merge_type=DELETE, no sequence col, no value col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k' + set 'partial_columns', 'true' + set 'merge_type', 'DELETE' + file 'merge6.csv' + time 10000 + } + qt_sql_2_2 """select * from ${tableName} order by k;""" + + + tableName = "test_partial_update_merge_type2" + sql """ DROP TABLE IF EXISTS ${tableName} force""" + sql """ CREATE TABLE ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` int, + `c3` int) + UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_type" = "BIGINT", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName}(k,c1,c2,c3,__DORIS_SEQUENCE_COL__) select number,number,number,number,1 from numbers("number"="9");""" + qt_sql """select * from ${tableName} order by k;""" + // 3.1 merge_type=MERGE, has sequence type col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,c2,seq,del' + set 'partial_columns', 'true' + set 'function_column.sequence_col', 'seq' + set 'merge_type', 'MERGE' + set 'delete', 'del=1' + file 'merge3.csv' + time 10000 + } + qt_sql_3_1 """select * from ${tableName} order by k;""" + inspect_rows """select k,c1,c2,c3,__DORIS_SEQUENCE_COL__,__DORIS_DELETE_SIGN__ from ${tableName} order by k,__DORIS_SEQUENCE_COL__;""" + // 3.2 merge_type=MERGE, has sequence type col, no value col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,seq,del' + set 'partial_columns', 'true' + set 'function_column.sequence_col', 'seq' + set 'merge_type', 'MERGE' + set 'delete', 'del=1' + file 'merge7.csv' + time 10000 + } + qt_sql_3_2 """select * from ${tableName} order by k;""" + inspect_rows """select k,c1,c2,c3,__DORIS_SEQUENCE_COL__,__DORIS_DELETE_SIGN__ from ${tableName} order by k,__DORIS_SEQUENCE_COL__;""" + + // 4.1 merge_type=DELETE, has sequence type col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,c2,seq' + set 'partial_columns', 'true' + set 'function_column.sequence_col', 'seq' + set 'merge_type', 'DELETE' + file 'merge4.csv' + time 10000 + } + qt_sql_4_1 """select * from ${tableName} order by k;""" + inspect_rows """select k,c1,c2,c3,__DORIS_SEQUENCE_COL__,__DORIS_DELETE_SIGN__ from ${tableName} order by k,__DORIS_SEQUENCE_COL__;""" + // 4.2 merge_type=DELETE, has sequence type col, no value col + streamLoad { + table "${tableName}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k,seq' + set 'partial_columns', 'true' + set 'function_column.sequence_col', 'seq' + set 'merge_type', 'DELETE' + file 'merge8.csv' + time 10000 + } + qt_sql_4_2 """select * from ${tableName} order by k;""" + inspect_rows """select k,c1,c2,c3,__DORIS_SEQUENCE_COL__,__DORIS_DELETE_SIGN__ from ${tableName} order by k,__DORIS_SEQUENCE_COL__;""" + + sql """ DROP TABLE IF EXISTS ${tableName}; """ + } + } +} diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_mow_with_sync_mv.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_mow_with_sync_mv.groovy new file mode 100644 index 00000000000000..972542dcd34e41 --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_mow_with_sync_mv.groovy @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partial_update_mow_with_sync_mv") { + + sql """drop table if exists test_partial_update_mow_with_sync_mv""" + + sql """ + CREATE TABLE `test_partial_update_mow_with_sync_mv` ( + `l_orderkey` BIGINT NULL, + `l_linenumber` INT NULL, + `l_partkey` INT NULL, + `l_suppkey` INT NULL, + `l_shipdate` DATE not NULL, + `l_quantity` DECIMAL(15, 2) NULL, + `l_extendedprice` DECIMAL(15, 2) NULL, + `l_discount` DECIMAL(15, 2) NULL, + `l_tax` DECIMAL(15, 2) NULL, + `l_returnflag` VARCHAR(1) NULL, + `l_linestatus` VARCHAR(1) NULL, + `l_commitdate` DATE NULL, + `l_receiptdate` DATE NULL, + `l_shipinstruct` VARCHAR(25) NULL, + `l_shipmode` VARCHAR(10) NULL, + `l_comment` VARCHAR(44) NULL + ) + unique KEY(l_orderkey, l_linenumber, l_partkey, l_suppkey, l_shipdate) + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true" + ); + """ + + sql """ + insert into test_partial_update_mow_with_sync_mv values + (null, 1, 2, 3, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy'), + (1, null, 3, 1, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy'), + (3, 3, null, 2, '2023-10-19', 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx'), + (1, 2, 3, null, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy'), + (2, 3, 2, 1, '2023-10-18', 5.5, 6.5, 7.5, 8.5, 'o', 'k', null, '2023-10-18', 'a', 'b', 'yyyyyyyyy'), + (3, 1, 1, 2, '2023-10-19', 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', null, 'c', 'd', 'xxxxxxxxx'), + (1, 3, 2, 2, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy'), + (null, 1, 2, 3, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy'), + (1, null, 3, 1, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy'), + (3, 3, null, 2, '2023-10-19', 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx'), + (1, 2, 3, null, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy'), + (2, 3, 2, 1, '2023-10-18', 5.5, 6.5, 7.5, 8.5, 'o', 'k', null, '2023-10-18', 'a', 'b', 'yyyyyyyyy'), + (3, 1, 1, 2, '2023-10-19', 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', null, 'c', 'd', 'xxxxxxxxx'), + (1, 3, 2, 2, '2023-10-17', 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy') + """ + + createMV (""" + CREATE MATERIALIZED VIEW mv + AS + select l_orderkey, l_linenumber, l_partkey, l_suppkey, l_shipdate, + substring(concat(l_returnflag, l_linestatus), 1) + from test_partial_update_mow_with_sync_mv; + """) + + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + + test { + sql """insert into test_partial_update_mow_with_sync_mv(l_orderkey, l_linenumber, l_partkey, l_suppkey, l_shipdate, l_returnflag) values + (2, 3, 2, 1, '2023-10-18', 'k'); """ + exception "Can't do partial update on merge-on-write Unique table with sync materialized view." + } + + streamLoad { + table "test_partial_update_mow_with_sync_mv" + set 'column_separator', ',' + set 'format', 'csv' + set 'partial_columns', 'true' + set 'columns', 'l_orderkey, l_linenumber, l_partkey, l_suppkey, l_shipdate, l_returnflag' + + file 'test_partial_update_mow_with_sync_mv.csv' + time 10000 + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("fail", json.Status.toLowerCase()) + assertTrue(json.Message.contains("Can't do partial update on merge-on-write Unique table with sync materialized view.")) + } + } +} diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy index 74fd2162cd916b..457961e2fec60c 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_native_insert_stmt.groovy @@ -246,23 +246,13 @@ suite("test_partial_update_native_insert_stmt", "p0") { } // test that session variable `enable_unique_key_partial_update` will only affect unique tables - for (def use_nerieds : [true, false]) { - logger.info("current params: use_nerieds: ${use_nerieds}") - if (use_nerieds) { - sql "set enable_nereids_planner=true;" - sql "set enable_fallback_to_original_planner=false;" - sql "sync;" - } else { - sql "set enable_nereids_planner=false;" - sql "sync;" - } + sql "sync;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" - sql "set enable_unique_key_partial_update=true;" - sql "sync;" - - def tableName8 = "test_partial_update_native_insert_stmt_agg_${use_nerieds}" - sql """ DROP TABLE IF EXISTS ${tableName8}; """ - sql """ CREATE TABLE IF NOT EXISTS ${tableName8} ( + def tableName8 = "test_partial_update_native_insert_stmt_agg" + sql """ DROP TABLE IF EXISTS ${tableName8}; """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName8} ( `user_id` LARGEINT NOT NULL, `date` DATE NOT NULL, `timestamp` DATETIME NOT NULL, @@ -277,15 +267,15 @@ suite("test_partial_update_native_insert_stmt", "p0") { DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 PROPERTIES ("replication_allocation" = "tag.location.default: 1");""" - sql """insert into ${tableName8} values + sql """insert into ${tableName8} values (10000,"2017-10-01","2017-10-01 08:00:05","北京",20,0,"2017-10-01 06:00:00",20,10,10), (10000,"2017-10-01","2017-10-01 09:00:05","北京",20,0,"2017-10-01 07:00:00",15,2,2); """ - qt_sql "select * from ${tableName8} order by user_id;" + qt_sql "select * from ${tableName8} order by user_id;" - def tableName9 = "test_partial_update_native_insert_stmt_dup_${use_nerieds}" - sql """ DROP TABLE IF EXISTS ${tableName9}; """ - sql """ CREATE TABLE IF NOT EXISTS ${tableName9} ( + def tableName9 = "test_partial_update_native_insert_stmt_dup" + sql """ DROP TABLE IF EXISTS ${tableName9}; """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName9} ( `user_id` LARGEINT NOT NULL, `date` DATE NOT NULL, `timestamp` DATETIME NOT NULL, @@ -300,15 +290,15 @@ suite("test_partial_update_native_insert_stmt", "p0") { DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 PROPERTIES ("replication_allocation" = "tag.location.default: 1");""" - sql """insert into ${tableName9} values + sql """insert into ${tableName9} values (10000,"2017-10-01","2017-10-01 08:00:05","北京",20,0,"2017-10-01 06:00:00",20,10,10), (10000,"2017-10-01","2017-10-01 09:00:05","北京",20,0,"2017-10-01 07:00:00",15,2,2); """ - qt_sql "select * from ${tableName9} order by user_id;" + qt_sql "select * from ${tableName9} order by user_id;" - def tableName10 = "test_partial_update_native_insert_stmt_mor_${use_nerieds}" - sql """ DROP TABLE IF EXISTS ${tableName10}; """ - sql """ CREATE TABLE IF NOT EXISTS ${tableName10} ( + def tableName10 = "test_partial_update_native_insert_stmt_mor" + sql """ DROP TABLE IF EXISTS ${tableName10}; """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName10} ( `user_id` LARGEINT NOT NULL, `date` DATE NOT NULL, `timestamp` DATETIME NOT NULL, @@ -323,13 +313,12 @@ suite("test_partial_update_native_insert_stmt", "p0") { DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 PROPERTIES ("replication_allocation" = "tag.location.default: 1", "enable_unique_key_merge_on_write" = "false");""" - sql """insert into ${tableName10} values + sql """insert into ${tableName10} values (10000,"2017-10-01","2017-10-01 08:00:05","北京",20,0,"2017-10-01 06:00:00",20,10,10), (10000,"2017-10-01","2017-10-01 09:00:05","北京",20,0,"2017-10-01 07:00:00",15,2,2); """ - qt_sql "select * from ${tableName10} order by user_id;" + qt_sql "select * from ${tableName10} order by user_id;" - sql """ DROP TABLE IF EXISTS ${tableName8}; """ - sql """ DROP TABLE IF EXISTS ${tableName9}; """ - sql """ DROP TABLE IF EXISTS ${tableName10}; """ - } + sql """ DROP TABLE IF EXISTS ${tableName8}; """ + sql """ DROP TABLE IF EXISTS ${tableName9}; """ + sql """ DROP TABLE IF EXISTS ${tableName10}; """ } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_only_keys.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_only_keys.groovy new file mode 100644 index 00000000000000..29f1257f9cb4ad --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_only_keys.groovy @@ -0,0 +1,62 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partial_update_only_keys", "p0") { + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect(user = context.config.jdbcUser, password = context.config.jdbcPassword, url = context.config.jdbcUrl) { + sql "use ${db};" + def tableName = "test_partial_update_only_keys" + sql """ DROP TABLE IF EXISTS ${tableName} force""" + sql """ CREATE TABLE ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` int, + `c3` int) + UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + sql """insert into ${tableName} select number,number,number,number from numbers("number"="3");""" + qt_sql """select * from ${tableName} order by k;""" + // new rows will be appended + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync" + sql "insert into ${tableName}(k) values(0),(1),(4),(5),(6);" + qt_sql """select * from ${tableName} order by k;""" + + // fail if has new rows + sql "set enable_insert_strict=true;" + sql "sync" + sql "insert into ${tableName}(k) values(0),(1),(4),(5),(6);" + qt_sql """select * from ${tableName} order by k;""" + test { + sql "insert into ${tableName}(k) values(0),(1),(10),(11);" + exception "Insert has filtered data in strict mode" + } + qt_sql """select * from ${tableName} order by k;""" + } + } +} diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy index 75fcaff36ad440..914244d1fa4b35 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change.groovy @@ -415,12 +415,13 @@ suite("test_partial_update_schema_change", "p0") { }); // test load data with all key column, should fail because - // it don't have any value columns + // it inserts a new row in strict mode streamLoad { table "${tableName}" set 'column_separator', ',' set 'partial_columns', 'true' + set 'strict_mode', 'true' set 'columns', 'c0, c1' file 'schema_change/load_with_key_column.csv' @@ -434,7 +435,7 @@ suite("test_partial_update_schema_change", "p0") { def json = parseJson(result) assertEquals("fail", json.Status.toLowerCase()) assertEquals(1, json.NumberTotalRows) - assertEquals(0, json.NumberFilteredRows) + assertEquals(1, json.NumberFilteredRows) assertEquals(0, json.NumberUnselectedRows) } } @@ -1001,6 +1002,7 @@ suite("test_partial_update_schema_change", "p0") { set 'column_separator', ',' set 'partial_columns', 'true' + set 'strict_mode', 'true' set 'columns', 'c0, c1' file 'schema_change/load_with_key_column.csv' @@ -1014,7 +1016,7 @@ suite("test_partial_update_schema_change", "p0") { def json = parseJson(result) assertEquals("fail", json.Status.toLowerCase()) assertEquals(1, json.NumberTotalRows) - assertEquals(0, json.NumberFilteredRows) + assertEquals(1, json.NumberFilteredRows) assertEquals(0, json.NumberUnselectedRows) } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy index 5fba5367a01bf4..54e51591725c2e 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_schema_change_row_store.groovy @@ -419,12 +419,13 @@ suite("test_partial_update_row_store_schema_change", "p0") { }); // test load data with all key column, should fail because - // it don't have any value columns + // it inserts a new row in strict mode streamLoad { table "${tableName}" set 'column_separator', ',' set 'partial_columns', 'true' + set 'strict_mode', 'true' set 'columns', 'c0, c1' file 'schema_change/load_with_key_column.csv' @@ -438,7 +439,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { def json = parseJson(result) assertEquals("fail", json.Status.toLowerCase()) assertEquals(1, json.NumberTotalRows) - assertEquals(0, json.NumberFilteredRows) + assertEquals(1, json.NumberFilteredRows) assertEquals(0, json.NumberUnselectedRows) } } @@ -1014,6 +1015,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { set 'column_separator', ',' set 'partial_columns', 'true' + set 'strict_mode', 'true' set 'columns', 'c0, c1' file 'schema_change/load_with_key_column.csv' @@ -1027,7 +1029,7 @@ suite("test_partial_update_row_store_schema_change", "p0") { def json = parseJson(result) assertEquals("fail", json.Status.toLowerCase()) assertEquals(1, json.NumberTotalRows) - assertEquals(0, json.NumberFilteredRows) + assertEquals(1, json.NumberFilteredRows) assertEquals(0, json.NumberUnselectedRows) } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_map_col.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_map_col.groovy new file mode 100644 index 00000000000000..6ebddc4fe93207 --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_seq_map_col.groovy @@ -0,0 +1,146 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partial_update_seq_map_col", "p0") { + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + sql "set enable_insert_strict=false;" + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + + def tableName = "test_partial_update_seq_map_col1" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` datetime(6) null default current_timestamp(6), + c3 int, + c4 int, + c5 int, + c6 int + ) UNIQUE KEY(`k`) + DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "c2", + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName}(k,c1) values(1,1);" + sql "insert into ${tableName}(k,c1) values(2,2);" + sql "insert into ${tableName}(k,c1) values(3,3);" + sql "insert into ${tableName}(k,c1) values(4,4);" + order_qt_sql1 "select k,c1,c3 from ${tableName} where c2=__DORIS_SEQUENCE_COL__;" + // update column which is not sequence map col + explain { + sql "update ${tableName} set c3=20 where c1<=2;" + contains "IS_PARTIAL_UPDATE: false" + } + sql "update ${tableName} set c3=20 where c1<=2;" + order_qt_sql1 "select k,c1,c3 from ${tableName} where c2=__DORIS_SEQUENCE_COL__;" + // update sequence map col + explain { + sql "update ${tableName} set c2='2099-09-10 12:00:00.977174' where k>2;" + contains "IS_PARTIAL_UPDATE: false" + } + sql "update ${tableName} set c2='2099-09-10 12:00:00.977174' where k>2;" + order_qt_sql1 "select k,c1,c3 from ${tableName} where c2=__DORIS_SEQUENCE_COL__;" + order_qt_sql1 "select k,c1,c2,c3,__DORIS_SEQUENCE_COL__ from ${tableName} where c1>2;" + + tableName = "test_partial_update_seq_map_col2" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` datetime not null default current_timestamp, + ) UNIQUE KEY(`k`) + DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "c2", + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName}(k,c1) values(1,1);" + sql "insert into ${tableName}(k,c1) values(2,2);" + sql "insert into ${tableName}(k,c1) values(3,3);" + sql "insert into ${tableName}(k,c1) values(4,4);" + order_qt_sql2 "select k,c1 from ${tableName} where c2=__DORIS_SEQUENCE_COL__;" + + + tableName = "test_partial_update_seq_map_col3" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` int not null default "999", + ) UNIQUE KEY(`k`) + DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "c2", + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName}(k,c1) values(1,1);" + sql "insert into ${tableName}(k,c1) values(2,2);" + sql "insert into ${tableName}(k,c1) values(3,3);" + sql "insert into ${tableName}(k,c1) values(4,4);" + order_qt_sql3 "select k,c1,c2,__DORIS_SEQUENCE_COL__ from ${tableName};" + sql "insert into ${tableName}(k,c1,c2) values(1,99,8888);" + sql "insert into ${tableName}(k,c1,c2) values(2,99,8888);" + sql "insert into ${tableName}(k,c1,c2) values(4,99,77);" + sql "insert into ${tableName}(k,c1,c2) values(5,99,8888);" + order_qt_sql3 "select k,c1,c2,__DORIS_SEQUENCE_COL__ from ${tableName}" + + + tableName = "test_partial_update_seq_map_col4" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` int null, + ) UNIQUE KEY(`k`) + DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "c2", + "store_row_column" = "${use_row_store}"); """ + sql "insert into ${tableName}(k,c1) values(1,1);" + sql "insert into ${tableName}(k,c1) values(2,2);" + sql "insert into ${tableName}(k,c1) values(3,3);" + sql "insert into ${tableName}(k,c1) values(4,4);" + order_qt_sql4 "select k,c1,c2,__DORIS_SEQUENCE_COL__ from ${tableName};" + + + tableName = "test_partial_update_seq_map_col5" + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( + `k` BIGINT NOT NULL, + `c1` int, + `c2` int not null + ) UNIQUE KEY(`k`) + DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true", + "function_column.sequence_col" = "c2", + "store_row_column" = "${use_row_store}"); """ + test { + sql "insert into ${tableName}(k,c1) values(1,1);" + exception "the unmentioned column `c2` should have default value or be nullable for newly inserted rows in non-strict mode partial update" + } + } +} diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_create.sql index d6566d3f1b35c3..1b5045e6a47732 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_create.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS `customer` ( UNIQUE KEY (`c_custkey`) DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql index b756ad271b8a9d..1fd98ea6391681 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/customer_sequence_create.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS `customer` ( UNIQUE KEY (`c_custkey`) DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_col" = 'c_custkey', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_create.sql index d6ad38d8bd83f5..f1e3b4f72dbb0d 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_create.sql @@ -20,6 +20,7 @@ CREATE TABLE IF NOT EXISTS `date` ( UNIQUE KEY (`d_datekey`) DISTRIBUTED BY HASH(`d_datekey`) BUCKETS 1 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql index b450b872650c2b..317bedfbd953d7 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/date_sequence_create.sql @@ -20,6 +20,7 @@ CREATE TABLE IF NOT EXISTS `date` ( UNIQUE KEY (`d_datekey`) DISTRIBUTED BY HASH(`d_datekey`) BUCKETS 1 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_col" = 'd_datekey', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql index 44bbfaf804c1d9..5b7ad6d45bcfbb 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_create.sql @@ -28,6 +28,7 @@ PARTITION p1997 VALUES [("19970101"), ("19980101")), PARTITION p1998 VALUES [("19980101"), ("19990101"))) DISTRIBUTED BY HASH(`lo_orderkey`) BUCKETS 48 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql index 230c4fde44d5dc..fbb869fabb200f 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/lineorder_sequence_create.sql @@ -28,6 +28,7 @@ PARTITION p1997 VALUES [("19970101"), ("19980101")), PARTITION p1998 VALUES [("19980101"), ("19990101"))) DISTRIBUTED BY HASH(`lo_orderkey`) BUCKETS 48 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_col" = 'lo_orderkey', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_create.sql index 0c6f6e371aa979..8d03167f909f6c 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_create.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS `part` ( UNIQUE KEY (`p_partkey`) DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql index a06acd3d9f8b08..fe6c511531bd56 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/part_sequence_create.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS `part` ( UNIQUE KEY (`p_partkey`) DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_col" = 'p_partkey', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_create.sql index b021767b47c160..dd146b460c2410 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_create.sql @@ -10,6 +10,7 @@ CREATE TABLE IF NOT EXISTS `supplier` ( UNIQUE KEY (`s_suppkey`) DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql index e88dde59acd588..19255397170e98 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_load_zstd/ddl/supplier_sequence_create.sql @@ -10,6 +10,7 @@ CREATE TABLE IF NOT EXISTS `supplier` ( UNIQUE KEY (`s_suppkey`) DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "function_column.sequence_col" = 's_suppkey', "compression"="zstd", "replication_num" = "1", diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/customer_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/customer_create.sql index d6566d3f1b35c3..1b5045e6a47732 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/customer_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/customer_create.sql @@ -11,6 +11,7 @@ CREATE TABLE IF NOT EXISTS `customer` ( UNIQUE KEY (`c_custkey`) DISTRIBUTED BY HASH(`c_custkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/date_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/date_create.sql index d6ad38d8bd83f5..f1e3b4f72dbb0d 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/date_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/date_create.sql @@ -20,6 +20,7 @@ CREATE TABLE IF NOT EXISTS `date` ( UNIQUE KEY (`d_datekey`) DISTRIBUTED BY HASH(`d_datekey`) BUCKETS 1 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql index 44bbfaf804c1d9..5b7ad6d45bcfbb 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/lineorder_create.sql @@ -28,6 +28,7 @@ PARTITION p1997 VALUES [("19970101"), ("19980101")), PARTITION p1998 VALUES [("19980101"), ("19990101"))) DISTRIBUTED BY HASH(`lo_orderkey`) BUCKETS 48 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/part_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/part_create.sql index 0c6f6e371aa979..8d03167f909f6c 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/part_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/part_create.sql @@ -12,6 +12,7 @@ CREATE TABLE IF NOT EXISTS `part` ( UNIQUE KEY (`p_partkey`) DISTRIBUTED BY HASH(`p_partkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql index b021767b47c160..dd146b460c2410 100644 --- a/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql +++ b/regression-test/suites/unique_with_mow_p2/ssb_unique_sql_zstd/ddl/supplier_create.sql @@ -10,6 +10,7 @@ CREATE TABLE IF NOT EXISTS `supplier` ( UNIQUE KEY (`s_suppkey`) DISTRIBUTED BY HASH(`s_suppkey`) BUCKETS 10 PROPERTIES ( +"enable_mow_light_delete" = "true", "compression"="zstd", "replication_num" = "1", "enable_unique_key_merge_on_write" = "true" diff --git a/regression-test/suites/update/test_unique_table_update.groovy b/regression-test/suites/update/test_unique_table_update.groovy index d9fb22c995cf4c..ac8698e1556d94 100644 --- a/regression-test/suites/update/test_unique_table_update.groovy +++ b/regression-test/suites/update/test_unique_table_update.groovy @@ -60,27 +60,4 @@ suite("test_unique_table_update","nonConcurrent") { } finally { qt_select_3 "select * from ${tableName} order by k;" } - - // test legacy planner - sql "set enable_nereids_planner=false" - // update key is not allowed - try { - sql "update ${tableName} set k=1, v1=1, v2=1 where k=2;" - assertTrue(false) - } catch (Exception e) { - logger.info(e.getMessage()) - assertTrue(e.getMessage().contains("Only value columns of unique table could be updated")) - } finally { - qt_select_4 "select * from ${tableName} order by k;" - } - - // update key is allowed - try { - sql "update ${tableName} set v1=1, v2=1 where k=2;" - } catch (Exception e) { - logger.info(e.getMessage()) - assertTrue(false) - } finally { - qt_select_5 "select * from ${tableName} order by k;" - } } diff --git a/regression-test/suites/update/test_update_mow.groovy b/regression-test/suites/update/test_update_mow.groovy index c5ca00590e6d18..9d7500c0d777ef 100644 --- a/regression-test/suites/update/test_update_mow.groovy +++ b/regression-test/suites/update/test_update_mow.groovy @@ -98,66 +98,4 @@ suite("test_update_mow", "p0") { sql "DROP TABLE IF EXISTS ${tbName2}" sql "DROP TABLE IF EXISTS ${tbName3}" sql "DROP TABLE IF EXISTS ${tbName4}" - - - // test legacy planner - sql "set enable_nereids_planner=false" - sql "sync" - def tableName5 = "test_update_mow_5" - sql "DROP TABLE IF EXISTS ${tableName5}" - sql """ CREATE TABLE ${tableName5} ( - k1 varchar(100) NOT NULL, - k2 int(11) NOT NULL, - v1 datetime NULL, - v2 varchar(100) NULL, - v3 int NULL) ENGINE=OLAP UNIQUE KEY(k1, k2) COMMENT 'OLAP' - DISTRIBUTED BY HASH(k1, k2) BUCKETS 3 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "store_row_column" = "true", - "enable_single_replica_compaction" = "false");""" - sql """insert into ${tableName5} values - ("a",1,"2023-11-12 00:00:00","test1",1), - ("b",2,"2023-11-12 00:00:00","test2",2), - ("c",3,"2023-11-12 00:00:00","test3",3);""" - qt_sql "select * from ${tableName5} order by k1,k2" - sql """update ${tableName5} set v3=999 where k1="a" and k2=1;""" - qt_sql "select * from ${tableName5} order by k1,k2" - sql """update ${tableName5} set v2="update value", v1="2022-01-01 00:00:00" where k1="c" and k2=3;""" - qt_sql "select * from ${tableName5} order by k1,k2" - - sql "DROP TABLE IF EXISTS ${tableName5}" - - // test nereids planner - sql "set enable_nereids_planner=true" - sql "set enable_fallback_to_original_planner=false" - sql "sync" - def tableName6 = "test_update_mow_6" - sql "DROP TABLE IF EXISTS ${tableName6}" - sql """ CREATE TABLE ${tableName6} ( - k1 varchar(100) NOT NULL, - k2 int(11) NOT NULL, - v1 datetime NULL, - v2 varchar(100) NULL, - v3 int NULL) ENGINE=OLAP UNIQUE KEY(k1, k2) COMMENT 'OLAP' - DISTRIBUTED BY HASH(k1, k2) BUCKETS 3 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "enable_unique_key_merge_on_write" = "true", - "light_schema_change" = "true", - "store_row_column" = "true", - "enable_single_replica_compaction" = "false");""" - sql """insert into ${tableName6} values - ("a",1,"2023-11-12 00:00:00","test1",1), - ("b",2,"2023-11-12 00:00:00","test2",2), - ("c",3,"2023-11-12 00:00:00","test3",3);""" - qt_sql "select * from ${tableName6} order by k1,k2" - sql """update ${tableName6} set v3=999 where k1="a" and k2=1;""" - qt_sql "select * from ${tableName6} order by k1,k2" - sql """update ${tableName6} set v2="update value", v1="2022-01-01 00:00:00" where k1="c" and k2=3;""" - qt_sql "select * from ${tableName6} order by k1,k2" - - sql "DROP TABLE IF EXISTS ${tableName6}" } diff --git a/regression-test/suites/variant_github_events_p2/load.groovy b/regression-test/suites/variant_github_events_p2/load.groovy index e1742231afc9ab..8e6c05ad3e91d1 100644 --- a/regression-test/suites/variant_github_events_p2/load.groovy +++ b/regression-test/suites/variant_github_events_p2/load.groovy @@ -215,6 +215,7 @@ suite("regression_test_variant_github_events_p2", "nonConcurrent,p2"){ } sql """set enable_match_without_inverted_index = false""" + sql """ set enable_common_expr_pushdown = true """ // filter by bloom filter qt_sql """select cast(v["payload"]["pull_request"]["additions"] as int) from github_events where cast(v["repo"]["name"] as string) = 'xpressengine/xe-core' order by 1;""" qt_sql """select * from github_events where cast(v["repo"]["name"] as string) = 'xpressengine/xe-core' order by 1 limit 10""" diff --git a/regression-test/suites/variant_p0/column_name.groovy b/regression-test/suites/variant_p0/column_name.groovy index 26520aafa502d5..7962112ff75f4d 100644 --- a/regression-test/suites/variant_p0/column_name.groovy +++ b/regression-test/suites/variant_p0/column_name.groovy @@ -28,8 +28,6 @@ suite("regression_test_variant_column_name", "variant_type"){ properties("replication_num" = "1", "disable_auto_compaction" = "true"); """ - // sql "set experimental_enable_nereids_planner = false" - sql """insert into ${table_name} values (1, '{"中文" : "中文", "\\\u4E2C\\\u6587": "unicode"}')""" qt_sql """select v['中文'], v['\\\u4E2C\\\u6587'] from ${table_name}""" // sql """insert into ${table_name} values (2, '{}')""" diff --git a/regression-test/suites/variant_p0/load.groovy b/regression-test/suites/variant_p0/load.groovy index 65a5a838c5038e..8f4b9a3cee53fd 100644 --- a/regression-test/suites/variant_p0/load.groovy +++ b/regression-test/suites/variant_p0/load.groovy @@ -446,6 +446,22 @@ suite("regression_test_variant", "p0"){ exception("Invalid type for variant column: 36") } + test { + sql """ + create table var( + `key` int, + `content` variant + ) + DUPLICATE KEY(`key`) + distributed by hash(`key`) buckets 8 + properties( + "replication_allocation" = "tag.location.default: 1", + "light_schema_change" = "false" + ); + """ + exception("errCode = 2, detailMessage = Variant type rely on light schema change") + } + } finally { // reset flags } diff --git a/regression-test/suites/variant_p0/nested.groovy b/regression-test/suites/variant_p0/nested.groovy index a2a3355854f7c6..90728df2532668 100644 --- a/regression-test/suites/variant_p0/nested.groovy +++ b/regression-test/suites/variant_p0/nested.groovy @@ -162,7 +162,7 @@ parallel_pipeline_task_num=7,parallel_fragment_exec_instance_num=4,profile_level properties("replication_num" = "1", "disable_auto_compaction" = "false", "enable_unique_key_merge_on_write" = "true", "variant_enable_flatten_nested" = "true"); """ sql """insert into var_nested2 select * from var_nested order by k limit 1024""" - qt_sql """select * from var_nested2 order by k limit 10;""" + qt_sql """select /*+SET_VAR(batch_size=4064,broker_load_batch_size=16352,disable_streaming_preaggregations=true,enable_distinct_streaming_aggregation=true,parallel_fragment_exec_instance_num=5,parallel_pipeline_task_num=1,profile_level=1,enable_pipeline_engine=false,enable_parallel_scan=true,parallel_scan_max_scanners_count=48,parallel_scan_min_rows_per_scanner=16384,enable_fold_constant_by_be=true,enable_rewrite_element_at_to_slot=true,runtime_filter_type=12,enable_parallel_result_sink=false,enable_nereids_planner=true,rewrite_or_to_in_predicate_threshold=2,enable_function_pushdown=true,enable_common_expr_pushdown=false,enable_local_exchange=false,partitioned_hash_join_rows_threshold=1048576,partitioned_hash_agg_rows_threshold=8,partition_pruning_expand_threshold=10,enable_share_hash_table_for_broadcast_join=false,enable_two_phase_read_opt=true,enable_common_expr_pushdown_for_inverted_index=true,enable_delete_sub_predicate_v2=true,min_revocable_mem=33554432,fetch_remote_schema_timeout_seconds=120,max_fetch_remote_schema_tablet_count=512,enable_join_spill=false,enable_sort_spill=false,enable_agg_spill=false,enable_force_spill=false,data_queue_max_blocks=1,spill_streaming_agg_mem_limit=268435456,external_agg_partition_bits=5) */ * from var_nested2 order by k limit 10;""" qt_sql """select v['nested'] from var_nested2 where k < 10 order by k limit 10;""" // explode variant array order_qt_explode_sql """select count(),cast(vv['xx'] as int) from var_nested lateral view explode_variant_array(v['nested']) tmp as vv where vv['xx'] = 10 group by cast(vv['xx'] as int)""" diff --git a/regression-test/suites/variant_p0/variant_with_rowstore.groovy b/regression-test/suites/variant_p0/variant_with_rowstore.groovy index f23a742249ea84..69957c25859da3 100644 --- a/regression-test/suites/variant_p0/variant_with_rowstore.groovy +++ b/regression-test/suites/variant_p0/variant_with_rowstore.groovy @@ -39,7 +39,6 @@ suite("regression_test_variant_rowstore", "variant_type"){ DISTRIBUTED BY HASH(k) BUCKETS 1 properties("replication_num" = "1", "disable_auto_compaction" = "false", "store_row_column" = "true"); """ - sql "set experimental_enable_nereids_planner = false" sql "sync" sql """insert into ${table_name} values (-3, '{"a" : 1, "b" : 1.5, "c" : [1, 2, 3]}')""" sql """insert into ${table_name} select -2, '{"a": 11245, "b" : [123, {"xx" : 1}], "c" : {"c" : 456, "d" : "null", "e" : 7.111}}' as json_str diff --git a/regression-test/suites/view_p0/create_view_star_except_and_cast_to_sql.groovy b/regression-test/suites/view_p0/create_view_star_except_and_cast_to_sql.groovy index 7b047136e33617..ce490063bcf128 100644 --- a/regression-test/suites/view_p0/create_view_star_except_and_cast_to_sql.groovy +++ b/regression-test/suites/view_p0/create_view_star_except_and_cast_to_sql.groovy @@ -16,7 +16,6 @@ // under the License. suite("create_view_star_except_and_cast_to_sql") { - sql "SET enable_nereids_planner=false;" sql """ DROP TABLE IF EXISTS mal_old_create_view """ diff --git a/regression-test/suites/view_p0/view_p0.groovy b/regression-test/suites/view_p0/view_p0.groovy index d6ae3a9313a83f..7ee8c9e913ccc4 100644 --- a/regression-test/suites/view_p0/view_p0.groovy +++ b/regression-test/suites/view_p0/view_p0.groovy @@ -135,7 +135,16 @@ suite("view_p0") { sql """CREATE VIEW IF NOT EXISTS `test_view_abc`(`a`) AS WITH T1 AS (SELECT 1 AS 'a'), T2 AS (SELECT 2 AS 'a') SELECT T1.a FROM T1 UNION ALL SELECT T2.a FROM T2;""" - sql "drop view if exists test_view_abc;" + sql "drop view if exists test_view_abc;" + + sql "drop view if exists test_view_aes;" + sql """CREATE VIEW IF NOT EXISTS `test_view_aes` + AS + SELECT aes_decrypt(from_base64("EXp7k7M9Zv1mIwPpno28Hg=="), '17IMZrGdwWf2Piy8', 'II2HLtihr5TQpQgR', 'AES_128_CBC'); + """ + qt_select_aes "SELECT * FROM test_view_aes;" + qt_show_aes "SHOW CREATE VIEW test_view_aes;" + sql "drop view if exists test_view_aes;" sql """DROP TABLE IF EXISTS test_view_table2""" diff --git a/regression-test/suites/workload_manager_p0/test_workload_sched_policy.groovy b/regression-test/suites/workload_manager_p0/test_workload_sched_policy.groovy index 1f772bc04ff1e5..6f80ca3490a608 100644 --- a/regression-test/suites/workload_manager_p0/test_workload_sched_policy.groovy +++ b/regression-test/suites/workload_manager_p0/test_workload_sched_policy.groovy @@ -17,8 +17,6 @@ suite("test_workload_sched_policy") { - sql "set experimental_enable_nereids_planner = false;" - sql "drop workload policy if exists test_cancel_policy;" sql "drop workload policy if exists set_action_policy;" sql "drop workload policy if exists fe_policy;" diff --git a/tools/fdb/fdb_ctl.sh b/tools/fdb/fdb_ctl.sh index 9c809abd5d4a50..09aaaaf3f2a0d8 100755 --- a/tools/fdb/fdb_ctl.sh +++ b/tools/fdb/fdb_ctl.sh @@ -77,7 +77,7 @@ function ensure_port_is_listenable() { function download_fdb() { if [[ -d "${FDB_PKG_DIR}" ]]; then - echo "FDB ${FDB_VERSION} already exists" + echo "FDB package for ${FDB_VERSION} already exists" return fi @@ -135,37 +135,94 @@ get_fdb_mode() { # Function to calculate number of processes calculate_process_numbers() { - # local memory_gb=$1 - local cpu_cores=$2 + local memory_limit_gb=$1 + local cpu_cores_limit=$2 - local min_processes=1 local data_dir_count # Convert comma-separated DATA_DIRS into an array IFS=',' read -r -a DATA_DIR_ARRAY <<<"${DATA_DIRS}" data_dir_count=${#DATA_DIR_ARRAY[@]} - # Stateless processes (at least 1, up to 1/4 of CPU cores) - local stateless_processes=$((cpu_cores / 4)) - [[ ${stateless_processes} -lt ${min_processes} ]] && stateless_processes=${min_processes} + # Parse the ratio input + IFS=':' read -r num_storage num_stateless num_log <<<"${STORAGE_STATELESS_LOG_RATIO}" - # Storage processes (must be a multiple of the number of data directories) - local storage_processes=$((cpu_cores / 4)) - [[ ${storage_processes} -lt ${data_dir_count} ]] && storage_processes=${data_dir_count} - storage_processes=$(((storage_processes / data_dir_count) * data_dir_count)) + # Initialize process counts + local storage_processes=0 # Storage processes + local stateless_processes=0 # Stateless processes + local log_processes=0 # Log processes - # Transaction processes (must be a multiple of the number of data directories) - local transaction_processes=$((cpu_cores / 8)) - [[ ${transaction_processes} -lt ${min_processes} ]] && transaction_processes=${min_processes} - [[ ${transaction_processes} -lt ${data_dir_count} ]] && transaction_processes=${data_dir_count} - transaction_processes=$(((transaction_processes / data_dir_count) * data_dir_count)) + local storage_process_num_limit=$((STORAGE_PROCESSES_NUM_PER_SSD * data_dir_count)) + local log_process_num_limit=$((LOG_PROCESSES_NUM_PER_SSD * data_dir_count)) + + if [[ "#${MEDIUM_TYPE}" = "#HDD" ]]; then + storage_process_num_limit=$((STORAGE_PROCESSES_NUM_PER_HDD * data_dir_count)) + log_process_num_limit=$((LOG_PROCESSES_NUM_PER_HDD * data_dir_count)) + fi + + # Find maximum number of processes while maintaining the specified ratio + while true; do + # Calculate process counts based on the ratio + storage_processes=$((storage_processes + num_storage)) + stateless_processes=$((storage_processes * num_stateless / num_storage)) + log_processes=$((storage_processes * num_log / num_storage)) + + # Calculate total CPUs used + local total_cpu_used=$((storage_processes + stateless_processes + log_processes)) + + # Check memory constraint + local total_memory_used=$(((MEMORY_STORAGE_GB * storage_processes) + (MEMORY_STATELESS_GB * stateless_processes) + (MEMORY_LOG_GB * log_processes))) + + # Check datadir limits + if ((storage_processes > storage_process_num_limit || log_processes > log_process_num_limit)); then + break + fi + + # Check overall constraints + if ((total_memory_used <= memory_limit_gb && total_cpu_used <= cpu_cores_limit)); then + continue + else + # If constraints are violated, revert back + storage_processes=$((storage_processes - num_storage)) + stateless_processes=$((storage_processes * num_stateless / num_storage)) + log_processes=$((storage_processes * num_log / num_storage)) + break + fi + done # Return the values - echo "${stateless_processes} ${storage_processes} ${transaction_processes}" + echo "${stateless_processes} ${storage_processes} ${log_processes}" +} + +function check_vars() { + IFS=',' read -r -a IPS <<<"${FDB_CLUSTER_IPS}" + + command -v ping || echo "ping is not available to check machines are available, please install ping." + + for IP_ADDRESS in "${IPS[@]}"; do + if ping -c 1 "${IP_ADDRESS}" &>/dev/null; then + echo "${IP_ADDRESS} is reachable" + else + echo "${IP_ADDRESS} is not reachable" + exit 1 + fi + done + + if [[ ${CPU_CORES_LIMIT} -gt $(nproc) ]]; then + echo "CPU_CORES_LIMIT beyonds number of machine, which is $(nproc)" + exit 1 + fi + + if [[ ${MEMORY_LIMIT_GB} -gt $(free -g | awk '/^Mem:/{print $2}') ]]; then + echo "MEMORY_LIMIT_GB beyonds memory of machine, which is $(free -g | awk '/^Mem:/{print $2}')" + exit 1 + fi } function deploy_fdb() { + check_vars download_fdb + check_fdb_running ln -sf "${FDB_PKG_DIR}/fdbserver" "${FDB_HOME}/fdbserver" ln -sf "${FDB_PKG_DIR}/fdbmonitor" "${FDB_HOME}/fdbmonitor" @@ -178,6 +235,10 @@ function deploy_fdb() { IFS=',' read -r -a DATA_DIR_ARRAY <<<"${DATA_DIRS}" for DIR in "${DATA_DIR_ARRAY[@]}"; do mkdir -p "${DIR}" || handle_error "Failed to create data directory ${DIR}" + if [[ -n "$(ls -A "${DIR}")" ]]; then + echo "Error: ${DIR} is not empty. DO NOT run deploy on a node running fdb. If you are sure that the node is not in a fdb cluster, run fdb_ctl.sh clean." + exit 1 + fi done echo -e "\tCreate fdb.cluster, coordinator: $(get_coordinators)" @@ -210,7 +271,14 @@ EOF CPU_CORES_LIMIT=${CPU_CORES_LIMIT:-1} # Calculate number of processes based on resources and data directories - read -r stateless_processes storage_processes transaction_processes <<<"$(calculate_process_numbers "${MEMORY_LIMIT_GB}" "${CPU_CORES_LIMIT}")" + read -r stateless_processes storage_processes log_processes <<<"$(calculate_process_numbers "${MEMORY_LIMIT_GB}" "${CPU_CORES_LIMIT}")" + echo "stateless process num : ${stateless_processes}, storage_processes : ${storage_processes}, log_processes : ${log_processes}" + if [[ ${storage_processes} -eq 0 ]]; then + # Add one process + PORT=$((FDB_PORT)) + echo "[fdbserver.${PORT}] +" >>"${FDB_HOME}/conf/fdb.conf" + fi # Add stateless processes for ((i = 0; i < stateless_processes; i++)); do @@ -233,12 +301,12 @@ datadir = ${DATA_DIR_ARRAY[${DIR_INDEX}]}/${PORT}" | tee -a "${FDB_HOME}/conf/fd FDB_PORT=$((FDB_PORT + storage_processes)) - # Add transaction processes - for ((i = 0; i < transaction_processes; i++)); do + # Add log processes + for ((i = 0; i < log_processes; i++)); do PORT=$((FDB_PORT + i)) DIR_INDEX=$((i % STORAGE_DIR_COUNT)) echo "[fdbserver.${PORT}] -class = transaction +class = log datadir = ${DATA_DIR_ARRAY[${DIR_INDEX}]}/${PORT}" | tee -a "${FDB_HOME}/conf/fdb.conf" >/dev/null done @@ -250,6 +318,8 @@ logdir = ${LOG_DIR}" >>"${FDB_HOME}/conf/fdb.conf" } function start_fdb() { + check_fdb_running + if [[ ! -f "${FDB_HOME}/fdbmonitor" ]]; then echo 'Please run setup before start fdb server' exit 1 @@ -275,6 +345,18 @@ function stop_fdb() { fi } +function check_fdb_running() { + if [[ -f "${FDB_HOME}/fdbmonitor.pid" ]]; then + local fdb_pid + + fdb_pid=$(cat "${FDB_HOME}/fdbmonitor.pid") + if ps -p "${fdb_pid}" >/dev/null; then + echo "fdbmonitor with pid ${fdb_pid} is running, stop it first." + exit 1 + fi + fi +} + function clean_fdb() { if [[ -f "${FDB_HOME}/fdbmonitor.pid" ]]; then local fdb_pid @@ -307,8 +389,6 @@ function clean_fdb() { function deploy() { local job="$1" - local skip_pkg="$2" - local skip_config="$3" if [[ ${job} =~ ^(all|fdb)$ ]]; then deploy_fdb @@ -324,16 +404,21 @@ function start() { fi if [[ ${init} =~ ^(all|fdb)$ ]]; then - echo "Try create database ..." local fdb_mode fdb_mode=$(get_fdb_mode) + + echo "Try create database in fdb ${fdb_mode}" + "${FDB_HOME}/fdbcli" -C "${FDB_HOME}/conf/fdb.cluster" \ - --exec "configure new ${fdb_mode} ssd" || true + --exec "configure new ${fdb_mode} ssd" || + "${FDB_HOME}/fdbcli" -C "${FDB_HOME}/conf/fdb.cluster" --exec "status" || + (echo "failed to start fdb, please check that all nodes have same FDB_CLUSTER_ID" && + exit 1) fi - echo "Start fdb success, and the cluster is:" - cat "${FDB_HOME}/conf/fdb.cluster" + echo "Start fdb success, and you can set conf for MetaService:" + echo "fdb_cluster = $(cat "${FDB_HOME}"/conf/fdb.cluster)" } function stop() { @@ -359,16 +444,12 @@ function status() { } function usage() { - echo "Usage: $0 [--skip-pkg] [--skip-config]" + echo "Usage: $0 " echo -e "\t deploy \t setup fdb env (dir, binary, conf ...)" echo -e "\t clean \t clean fdb data" echo -e "\t start \t start fdb" echo -e "\t stop \t stop fdb" - echo -e "" - echo -e "" - echo -e "Args:" - echo -e "\t --skip-pkg \t skip to update binary pkgs during deploy" - echo -e "\t --skip-config \t skip to update config during deploy" + echo -e "\t fdbcli \t stop fdb" echo -e "" exit 1 } @@ -390,12 +471,10 @@ shift job="fdb" init="fdb" -skip_pkg="false" -skip_config="false" case ${cmd} in deploy) - deploy "${job}" "${skip_pkg}" "${skip_config}" + deploy "${job}" ;; start) start "${job}" "${init}" diff --git a/tools/fdb/fdb_vars.sh b/tools/fdb/fdb_vars.sh index c0bbadabdd6cd1..0d4cc1667bc116 100644 --- a/tools/fdb/fdb_vars.sh +++ b/tools/fdb/fdb_vars.sh @@ -25,13 +25,15 @@ # shellcheck disable=2034 DATA_DIRS="/mnt/foundationdb/data1,/mnt/foundationdb/data2,/mnt/foundationdb/data3" +MEDIUM_TYPE="SSD" + # Define the cluster IPs (comma-separated list of IP addresses) # You should have at least 3 IP addresses for a production cluster # The first IP addresses will be used as the coordinator, # num of coordinators depends on the number of nodes, see the function get_coordinators. # For high availability, machines should be in diffrent rack. # shellcheck disable=2034 -FDB_CLUSTER_IPS="172.200.0.2,172.200.0.3,172.200.0.4" +FDB_CLUSTER_IPS="172.200.0.5,172.200.0.6,172.200.0.7" # Define the FoundationDB home directory, which contains the fdb binaries and logs. # default is /fdbhome and have to be absolute path. @@ -41,23 +43,23 @@ FDB_HOME="/fdbhome" # Define the cluster id, shoule be generated random like mktemp -u XXXXXXXX, # have to be different for each cluster. # shellcheck disable=2034 -FDB_CLUSTER_ID=$(mktemp -u XXXXXXXX) +FDB_CLUSTER_ID="ra7eOp7x" # Define the cluster description, you 'd better to change it. # shellcheck disable=2034 FDB_CLUSTER_DESC="mycluster" -#======================= OPTIONAL CUSTOMIZATION ============================ # Define resource limits # Memory limit in gigabytes # shellcheck disable=2034 -MEMORY_LIMIT_GB=16 +MEMORY_LIMIT_GB=64 # CPU cores limit # shellcheck disable=2034 -CPU_CORES_LIMIT=8 +CPU_CORES_LIMIT=16 + +#======================= OPTIONAL CUSTOMIZATION ============================ -#=========================================================================== # Define starting port for the servers # This is the base port number for the fdbserver processes, usually does not need to be changed # shellcheck disable=2034 @@ -70,3 +72,24 @@ FDB_VERSION="7.1.38" # Users who run the fdb processes, default is the current user # shellcheck disable=2034 USER=$(whoami) + +# ratio of storage, stateless and log process num in fdb +# shellcheck disable=2034 +STORAGE_STATELESS_LOG_RATIO="2:1:1" + +# Set process limits +# shellcheck disable=2034 +STORAGE_PROCESSES_NUM_PER_HDD=1 +# shellcheck disable=2034 +LOG_PROCESSES_NUM_PER_HDD=1 +# shellcheck disable=2034 +STORAGE_PROCESSES_NUM_PER_SSD=4 +# shellcheck disable=2034 +LOG_PROCESSES_NUM_PER_SSD=4 + +# shellcheck disable=2034 +MEMORY_STORAGE_GB=8 +# shellcheck disable=2034 +MEMORY_STATELESS_GB=1 +# shellcheck disable=2034 +MEMORY_LOG_GB=2