Skip to main content

file_engine/
query.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub(crate) mod file_stream;
16
17use std::collections::HashSet;
18use std::pin::Pin;
19use std::sync::Arc;
20use std::task::{Context, Poll};
21
22use common_datasource::object_store::build_backend;
23use common_recordbatch::adapter::RecordBatchMetrics;
24use common_recordbatch::error::{self as recordbatch_error, Result as RecordBatchResult};
25use common_recordbatch::{
26    DfSendableRecordBatchStream, OrderOption, RecordBatch, RecordBatchStream,
27    SendableRecordBatchStream,
28};
29use datafusion::logical_expr::utils as df_logical_expr_utils;
30use datafusion_expr::expr::Expr;
31use datatypes::arrow::compute as arrow_compute;
32use datatypes::data_type::DataType;
33use datatypes::schema::{Schema, SchemaRef};
34use datatypes::vectors::Helper;
35use futures::Stream;
36use snafu::{GenerateImplicitData, ResultExt, ensure};
37use store_api::storage::ScanRequest;
38
39use self::file_stream::ScanPlanConfig;
40use crate::error::{BuildBackendSnafu, ProjectSchemaSnafu, ProjectionOutOfBoundsSnafu, Result};
41use crate::region::FileRegion;
42
43impl FileRegion {
44    pub fn query(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
45        let store = build_backend(&self.url, &self.options).context(BuildBackendSnafu)?;
46
47        let file_projection = self.projection_pushdown_to_file(&request.projection)?;
48        let file_filters = self.filters_pushdown_to_file(&request.filters)?;
49        let file_schema = Arc::new(Schema::new(self.file_options.file_column_schemas.clone()));
50
51        let projected_file_schema = if let Some(projection) = &file_projection {
52            Arc::new(
53                file_schema
54                    .try_project(projection)
55                    .context(ProjectSchemaSnafu)?,
56            )
57        } else {
58            file_schema.clone()
59        };
60
61        let file_stream = file_stream::create_stream(
62            &self.format,
63            &ScanPlanConfig {
64                file_schema,
65                files: &self.file_options.files,
66                projection: file_projection.as_ref(),
67                filters: &file_filters,
68                limit: request.limit,
69                store,
70            },
71        )?;
72
73        let scan_schema = self.scan_schema(&request.projection)?;
74
75        Ok(Box::pin(FileToScanRegionStream::new(
76            scan_schema,
77            projected_file_schema,
78            file_stream,
79        )))
80    }
81
82    fn projection_pushdown_to_file(
83        &self,
84        req_projection: &Option<Vec<usize>>,
85    ) -> Result<Option<Vec<usize>>> {
86        let Some(scan_projection) = req_projection.as_ref() else {
87            return Ok(None);
88        };
89
90        let file_column_schemas = &self.file_options.file_column_schemas;
91        let mut file_projection = Vec::with_capacity(scan_projection.len());
92        for column_index in scan_projection {
93            ensure!(
94                *column_index < self.metadata.schema.num_columns(),
95                ProjectionOutOfBoundsSnafu {
96                    column_index: *column_index,
97                    bounds: self.metadata.schema.num_columns()
98                }
99            );
100
101            let column_name = self.metadata.schema.column_name_by_index(*column_index);
102            let file_column_index = file_column_schemas
103                .iter()
104                .position(|c| c.name == column_name);
105            if let Some(file_column_index) = file_column_index {
106                file_projection.push(file_column_index);
107            }
108        }
109        Ok(Some(file_projection))
110    }
111
112    // Collects filters that can be pushed down to the file, specifically filters where Expr
113    // only contains columns from the file.
114    fn filters_pushdown_to_file(&self, scan_filters: &[Expr]) -> Result<Vec<Expr>> {
115        let mut file_filters = Vec::with_capacity(scan_filters.len());
116
117        let file_column_names = self
118            .file_options
119            .file_column_schemas
120            .iter()
121            .map(|c| &c.name)
122            .collect::<HashSet<_>>();
123
124        let mut aux_column_set = HashSet::new();
125        for scan_filter in scan_filters {
126            df_logical_expr_utils::expr_to_columns(scan_filter, &mut aux_column_set)?;
127
128            let all_file_columns = aux_column_set
129                .iter()
130                .all(|column_in_expr| file_column_names.contains(&column_in_expr.name));
131            if all_file_columns {
132                file_filters.push(scan_filter.clone());
133            }
134            aux_column_set.clear();
135        }
136        Ok(file_filters)
137    }
138
139    fn scan_schema(&self, req_projection: &Option<Vec<usize>>) -> Result<SchemaRef> {
140        let schema = if let Some(indices) = req_projection {
141            Arc::new(
142                self.metadata
143                    .schema
144                    .try_project(indices)
145                    .context(ProjectSchemaSnafu)?,
146            )
147        } else {
148            self.metadata.schema.clone()
149        };
150
151        Ok(schema)
152    }
153}
154
155struct FileToScanRegionStream {
156    scan_schema: SchemaRef,
157    file_stream: DfSendableRecordBatchStream,
158    /// Maps columns in `scan_schema` to their index in the projected file schema.
159    /// `None` means the column doesn't exist in the file and should be filled with default values.
160    scan_to_file_projection: Vec<Option<usize>>,
161}
162
163impl RecordBatchStream for FileToScanRegionStream {
164    fn schema(&self) -> SchemaRef {
165        self.scan_schema.clone()
166    }
167
168    fn output_ordering(&self) -> Option<&[OrderOption]> {
169        None
170    }
171
172    fn metrics(&self) -> Option<RecordBatchMetrics> {
173        None
174    }
175}
176
177impl Stream for FileToScanRegionStream {
178    type Item = RecordBatchResult<RecordBatch>;
179
180    fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
181        match Pin::new(&mut self.file_stream).poll_next(ctx) {
182            Poll::Pending => Poll::Pending,
183            Poll::Ready(Some(Ok(file_record_batch))) => {
184                let num_rows = file_record_batch.num_rows();
185                let mut columns = Vec::with_capacity(self.scan_schema.num_columns());
186
187                for (idx, column_schema) in self.scan_schema.column_schemas().iter().enumerate() {
188                    if let Some(file_idx) = self.scan_to_file_projection[idx] {
189                        let expected_arrow_type = column_schema.data_type.as_arrow_type();
190                        let mut array = file_record_batch.column(file_idx).clone();
191
192                        if array.data_type() != &expected_arrow_type {
193                            array = arrow_compute::cast(array.as_ref(), &expected_arrow_type)
194                                .context(recordbatch_error::ArrowComputeSnafu)?;
195                        }
196
197                        let vector = Helper::try_into_vector(array)
198                            .context(recordbatch_error::DataTypesSnafu)?;
199                        columns.push(vector);
200                    } else {
201                        let vector = column_schema
202                            .create_default_vector(num_rows)
203                            .context(recordbatch_error::DataTypesSnafu)?
204                            .ok_or_else(|| {
205                                recordbatch_error::CreateRecordBatchesSnafu {
206                                    reason: format!(
207                                        "column {} is missing from file source and has no default",
208                                        column_schema.name
209                                    ),
210                                }
211                                .build()
212                            })?;
213                        columns.push(vector);
214                    }
215                }
216
217                let record_batch = RecordBatch::new(self.scan_schema.clone(), columns)?;
218
219                Poll::Ready(Some(Ok(record_batch)))
220            }
221            Poll::Ready(Some(Err(error))) => {
222                Poll::Ready(Some(Err(recordbatch_error::Error::PollStream {
223                    error,
224                    location: snafu::Location::generate(),
225                })))
226            }
227            Poll::Ready(None) => Poll::Ready(None),
228        }
229    }
230}
231
232impl FileToScanRegionStream {
233    fn new(
234        scan_schema: SchemaRef,
235        file_schema: SchemaRef,
236        file_stream: DfSendableRecordBatchStream,
237    ) -> Self {
238        let scan_to_file_projection = scan_schema
239            .column_schemas()
240            .iter()
241            .map(|column| file_schema.column_index_by_name(&column.name))
242            .collect();
243
244        Self {
245            scan_schema,
246            file_stream,
247            scan_to_file_projection,
248        }
249    }
250}