1
0
Fork 0
mirror of https://gitlab.com/bramw/baserow.git synced 2025-03-16 21:43:34 +00:00

Σ4️⃣ Footer calculation v0.4 - improve perfs

This commit is contained in:
Jrmi 2022-04-19 07:03:00 +00:00
parent 0fb56c6ecd
commit 9545c817ac
23 changed files with 1596 additions and 220 deletions
backend
changelog.md
web-frontend
modules/database
components/view/grid
services/view
store/view
viewAggregationTypes.js
test
fixtures
unit/database
components/view/grid
store/view

View file

@ -36,3 +36,39 @@ field_aggregation_response_schema = build_object_type(
},
required=["value"],
)
field_aggregations_response_schema = build_object_type(
{
"field_{id}": {
"anyOf": [
{
"type": "number",
"description": "The aggregation result for the field with id {id}.",
"example": 5,
},
{
"type": "string",
"description": "The aggregation result for the field with id {id}.",
},
{
"type": "array",
"items": {},
"description": "The aggregation result for the field with id {id}.",
},
{
"type": "object",
"description": "The aggregation result for the field with id {id}.",
},
]
},
"total": {
"type": "integer",
"description": (
"The total value count. Only returned if `include=total` "
"is specified as GET parameter."
),
"example": 7,
},
},
)

View file

@ -20,7 +20,6 @@ class GridViewFieldOptionsSerializer(serializers.ModelSerializer):
"aggregation_raw_type"
).help_text,
required=False,
default="",
allow_blank=True,
)

View file

@ -5,6 +5,7 @@ from .views import (
PublicGridViewInfoView,
PublicGridViewRowsView,
GridViewFieldAggregationView,
GridViewFieldAggregationsView,
)
app_name = "baserow.contrib.database.api.views.grid"
@ -15,6 +16,11 @@ urlpatterns = [
GridViewFieldAggregationView.as_view(),
name="field-aggregation",
),
re_path(
r"(?P<view_id>[0-9]+)/aggregations/$",
GridViewFieldAggregationsView.as_view(),
name="field-aggregations",
),
re_path(r"(?P<view_id>[0-9]+)/$", GridViewView.as_view(), name="list"),
re_path(
r"(?P<slug>[-\w]+)/public/info/$",

View file

@ -69,7 +69,10 @@ from baserow.contrib.database.fields.exceptions import (
from baserow.core.exceptions import UserNotInGroup
from .errors import ERROR_GRID_DOES_NOT_EXIST
from .serializers import GridViewFilterSerializer
from .schemas import field_aggregation_response_schema
from .schemas import (
field_aggregation_response_schema,
field_aggregations_response_schema,
)
def get_available_aggregation_type():
@ -343,6 +346,98 @@ class GridViewView(APIView):
return Response(serializer.data)
class GridViewFieldAggregationsView(APIView):
permission_classes = (IsAuthenticated,)
def get_permissions(self):
if self.request.method == "GET":
return [AllowAny()]
return super().get_permissions()
@extend_schema(
parameters=[
OpenApiParameter(
name="view_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Select the view you want the aggregations for.",
),
OpenApiParameter(
name="search",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
description=(
"If provided the aggregations are calculated only for matching "
"rows."
),
),
OpenApiParameter(
name="include",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
description=(
"if `include` is set to `total`, the total row count will be "
"returned with the result."
),
),
],
tags=["Database table grid view"],
operation_id="get_database_table_grid_view_field_aggregations",
description=(
"Returns all field aggregations values previously defined for this grid "
"view. If filters exist for this view, the aggregations are computed only "
"on filtered rows."
"You need to have read permissions on the view to request aggregations."
),
responses={
200: field_aggregations_response_schema,
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
]
),
404: get_error_schema(
[
"ERROR_GRID_DOES_NOT_EXIST",
]
),
},
)
@map_exceptions(
{
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
ViewDoesNotExist: ERROR_GRID_DOES_NOT_EXIST,
}
)
@allowed_includes("total")
def get(self, request, view_id, total):
"""
Returns the aggregation values for the specified view considering the filters
and the search term defined for this grid view.
Also returns the total count to be able to make percentage on client side if
asked.
"""
search = request.GET.get("search")
view_handler = ViewHandler()
view = view_handler.get_view(view_id, GridView)
# Check permission
view.table.database.group.has_user(
request.user, raise_error=True, allow_if_template=True
)
# Compute aggregation
# Note: we can't optimize model by giving a model with just
# the aggregated field because we may need other fields for filtering
result = view_handler.get_view_field_aggregations(
view, with_total=total, search=search
)
return Response(result)
class GridViewFieldAggregationView(APIView):
permission_classes = (IsAuthenticated,)
@ -388,12 +483,11 @@ class GridViewFieldAggregationView(APIView):
tags=["Database table grid view"],
operation_id="get_database_table_grid_view_field_aggregation",
description=(
"Computes an aggregation of all values for a specific field from the selected "
"grid view. You can select the aggregation type by specifying "
"Computes the aggregation of all the values for a specified field from the "
"selected grid view. You must select the aggregation type by setting "
"the `type` GET parameter. If filters are configured for the selected "
"view, the aggregation is calculated only on filtered rows. "
"The total count of rows is also always returned with the result."
"You need to have read permissions on the view to request aggregations."
"You need to have read permissions on the view to request an aggregation."
),
responses={
200: field_aggregation_response_schema,
@ -426,7 +520,8 @@ class GridViewFieldAggregationView(APIView):
"""
Returns the aggregation value for the specified view/field considering
the filters configured for this grid view.
Also return the total count to be able to make percentage on client side.
Also returns the total count to be able to make percentage on client side if
asked.
"""
view_handler = ViewHandler()
@ -448,7 +543,7 @@ class GridViewFieldAggregationView(APIView):
)
result = {
"value": aggregations[f"field_{field_instance.id}__{aggregation_type}"],
"value": aggregations[field_instance.db_column],
}
if total:

View file

@ -435,6 +435,9 @@ class FieldHandler:
)
updated_fields = update_collector.apply_updates_and_get_updated_fields()
ViewHandler().field_updated(field)
field_updated.send(
self,
field=field,
@ -762,6 +765,8 @@ class FieldHandler:
)
if apply_and_send_updates:
updated_fields = update_collector.apply_updates_and_get_updated_fields()
ViewHandler().field_updated(updated_fields)
field_restored.send(
self, field=field, user=None, related_fields=updated_fields
)

View file

@ -919,6 +919,10 @@ class FieldType(
dependant_path_to_starting_table,
)
from baserow.contrib.database.views.handler import ViewHandler
ViewHandler().field_value_updated(field)
def row_of_dependency_deleted(
self,
field,
@ -1053,6 +1057,10 @@ class FieldType(
update_collector,
)
from baserow.contrib.database.views.handler import ViewHandler
ViewHandler().field_updated(field)
def field_dependency_deleted(
self, field, deleted_field, via_path_to_starting_table, update_collector
):

View file

@ -307,6 +307,10 @@ class RowHandler:
)
update_collector.apply_updates_and_get_updated_fields()
from baserow.contrib.database.views.handler import ViewHandler
ViewHandler().field_value_updated(updated_fields)
return instance
# noinspection PyMethodMayBeStatic
@ -425,6 +429,10 @@ class RowHandler:
# query for the rows updated values instead.
row.refresh_from_db(fields=model.fields_requiring_refresh_after_update())
from baserow.contrib.database.views.handler import ViewHandler
ViewHandler().field_value_updated(updated_fields)
row_updated.send(
self,
row=row,
@ -492,6 +500,10 @@ class RowHandler:
)
update_collector.apply_updates_and_get_updated_fields()
from baserow.contrib.database.views.handler import ViewHandler
ViewHandler().field_value_updated(updated_fields)
row_updated.send(
self,
row=row,
@ -552,6 +564,10 @@ class RowHandler:
)
update_collector.apply_updates_and_get_updated_fields()
from baserow.contrib.database.views.handler import ViewHandler
ViewHandler().field_value_updated(updated_fields)
row_deleted.send(
self,
row_id=row_id,

View file

@ -13,6 +13,7 @@ from baserow.contrib.database.fields.registries import field_type_registry
from baserow.contrib.database.rows.signals import row_created
from baserow.contrib.database.table.models import Table, GeneratedTableModel
from baserow.contrib.database.table.signals import table_created
from baserow.contrib.database.views.handler import ViewHandler
from baserow.core.exceptions import TrashItemDoesNotExist
from baserow.core.models import TrashEntry
from baserow.core.trash.exceptions import RelatedTableTrashedException
@ -238,6 +239,9 @@ class RowTrashableItemType(TrashableItemType):
path_to_starting_table,
)
update_collector.apply_updates_and_get_updated_fields()
ViewHandler().field_value_updated(updated_fields)
row_created.send(
self,
row=trashed_item,

View file

@ -1,8 +1,10 @@
from collections import defaultdict
from copy import deepcopy
from typing import Dict, Any, List, Optional, Iterable, Tuple
from typing import Dict, Any, List, Optional, Iterable, Tuple, Union
from redis.exceptions import LockNotOwnedError
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.core.cache import cache
from django.db import models as django_models
from django.db.models import F, Count
@ -167,6 +169,9 @@ class ViewHandler:
view = set_allowed_attrs(view_values, allowed_fields, view)
view.save()
if "filters_disabled" in view_values:
view_type.after_filter_update(view)
view_updated.send(self, view=view, user=user)
return view
@ -313,6 +318,41 @@ class ViewHandler:
for view_type in view_type_registry.get_all():
view_type.after_field_type_change(field)
def field_value_updated(self, updated_fields: Union[Iterable[Field], Field]):
"""
Called after a field value has been modified because of a row creation,
modification, deletion. This method is called for each directly or indirectly
affected list of fields.
Calls the `.after_field_value_update(updated_fields)` of each view type.
:param updated_fields: The field or list of fields that are affected.
"""
if not isinstance(updated_fields, list):
updated_fields = [updated_fields]
# Call each view types hook
for view_type in view_type_registry.get_all():
view_type.after_field_value_update(updated_fields)
def field_updated(self, updated_fields: Union[Iterable[Field], Field]):
"""
Called for each field modification. This include indirect modification when
fields depends from another (like formula fields or lookup fields).
Calls the `.after_field_update(updated_fields)` of each view type.
:param updated_fields: The field or list of fields that are updated.
"""
if not isinstance(updated_fields, list):
updated_fields = [updated_fields]
# Call each view types hook
for view_type in view_type_registry.get_all():
view_type.after_field_update(updated_fields)
def _get_filter_builder(
self, view: View, model: GeneratedTableModel
) -> FilterBuilder:
@ -380,7 +420,7 @@ class ViewHandler:
:param base_queryset: The base queryset from where to select the view filter
object. This can for example be used to do a `select_related`.
:type base_queryset: Queryset
:raises ViewFilterDoesNotExist: The the requested view does not exists.
:raises ViewFilterDoesNotExist: The requested view does not exists.
:return: The requested view filter instance.
:type: ViewFilter
"""
@ -462,6 +502,9 @@ class ViewHandler:
view=view, field=field, type=view_filter_type.type, value=value
)
# Call view type hooks
view_type.after_filter_update(view)
view_filter_created.send(self, view_filter=view_filter, user=user)
return view_filter
@ -513,6 +556,10 @@ class ViewHandler:
view_filter.type = type_name
view_filter.save()
# Call view type hooks
view_type = view_type_registry.get_by_model(view_filter.view.specific_class)
view_type.after_filter_update(view_filter.view)
view_filter_updated.send(self, view_filter=view_filter, user=user)
return view_filter
@ -533,6 +580,10 @@ class ViewHandler:
view_filter_id = view_filter.id
view_filter.delete()
# Call view type hooks
view_type = view_type_registry.get_by_model(view_filter.view.specific_class)
view_type.after_filter_update(view_filter.view)
view_filter_deleted.send(
self, view_filter_id=view_filter_id, view_filter=view_filter, user=user
)
@ -838,18 +889,221 @@ class ViewHandler:
queryset = queryset.search_all_fields(search, only_search_by_field_ids)
return queryset
def _get_aggregation_lock_cache_key(self, view: View):
"""
Returns the aggregation lock cache key for the specified view.
"""
return f"_aggregation__{view.pk}_lock"
def _get_aggregation_value_cache_key(self, view: View, name: str):
"""
Returns the aggregation value cache key for the specified view and name.
"""
return f"aggregation_value__{view.pk}_{name}"
def _get_aggregation_version_cache_key(self, view: View, name: str):
"""
Returns the aggregation version cache key for the specified view and name.
"""
return f"aggregation_version__{view.pk}_{name}"
def clear_full_aggregation_cache(self, view: View):
"""
Clears the cache key for the specified view.
"""
view_type = view_type_registry.get_by_model(view.specific_class)
aggregations = view_type.get_aggregations(view)
cached_names = [agg[0].db_column for agg in aggregations]
self.clear_aggregation_cache(view, cached_names)
def clear_aggregation_cache(self, view: View, names: Union[List[str], str]):
"""
Increments the version in cache for the specified view/name.
"""
if not isinstance(names, list):
names = [names]
for name in names:
cache_key = self._get_aggregation_version_cache_key(view, name)
try:
cache.incr(cache_key, 1)
except ValueError:
# No cache key, we create one
cache.set(cache_key, 2)
def _get_aggregations_to_compute(
self,
view: View,
aggregations: Iterable[Tuple[django_models.Field, str]],
no_cache: bool = False,
) -> Tuple[Dict[str, Any], Dict[str, Tuple[django_models.Field, str, int]]]:
"""
Figure out which aggregation needs to be computed and which one is cached.
Returns a tuple with:
- a dict of field_name -> cached values for values that are in the cache
- a dict of values that need to be computed. keys are field name and values
are a tuple with:
- The field instance which aggregation needs to be computed
- The aggregation_type
- The current version
"""
if not no_cache:
names = [agg[0].db_column for agg in aggregations]
# Get value and version cache all at once
cached_keys = [
self._get_aggregation_value_cache_key(view, name) for name in names
] + [self._get_aggregation_version_cache_key(view, name) for name in names]
cached = cache.get_many(cached_keys)
else:
# We don't want to use cache for search query
cached = {}
valid_cached_values = {}
need_computation = {}
# Try to get field value from cache or add it to the need_computation list
for (field_instance, aggregation_type_name) in aggregations:
cached_value = cached.get(
self._get_aggregation_value_cache_key(view, field_instance.db_column),
{"version": 0},
)
cached_version = cached.get(
self._get_aggregation_version_cache_key(view, field_instance.db_column),
1,
)
# If the value version and the current version are the same we don't
# need to recompute the value.
if cached_value["version"] == cached_version:
valid_cached_values[field_instance.db_column] = cached_value["value"]
else:
need_computation[field_instance.db_column] = {
"instance": field_instance,
"aggregation_type": aggregation_type_name,
"version": cached_version,
}
return (valid_cached_values, need_computation)
def get_view_field_aggregations(
self,
view: View,
model: Union[GeneratedTableModel, None] = None,
with_total: bool = False,
search=None,
) -> Dict[str, Any]:
"""
Returns a dict of aggregation for all aggregation configured for the view in
parameters. Unless the search parameter is set to a non empty string,
the aggregations values are cached when computed and must be
invalidated when necessary.
The dict keys are field names and value are aggregation values. The total is
included in result if the with_total is specified.
:param view: The view to get the field aggregation for.
:param model: The model for this view table to generate the aggregation
query from, if not specified then the model will be generated
automatically.
:param with_total: Whether the total row count should be returned in the
result.
:param search: the search string to considerate. If the search parameter is
defined, we don't use the cache so we recompute aggregation on the fly.
:raises FieldAggregationNotSupported: When the view type doesn't support
field aggregation.
:returns: A dict of aggregation value
"""
view_type = view_type_registry.get_by_model(view.specific_class)
# Check if view supports field aggregation
if not view_type.can_aggregate_field:
raise FieldAggregationNotSupported(
f"Field aggregation is not supported for {view_type.type} views."
)
aggregations = view_type.get_aggregations(view)
(
values,
need_computation,
) = self._get_aggregations_to_compute(view, aggregations, no_cache=search)
use_lock = hasattr(cache, "lock")
used_lock = False
if not search and use_lock and (need_computation or with_total):
# Lock the cache to avoid many updates when many queries arrive at same
# times which happens when multiple users are on the same view.
# This lock is optional. It avoid processing but doesn't break anything
# if it fails so the timeout is low.
cache_lock = cache.lock(
self._get_aggregation_lock_cache_key(view), timeout=10
)
cache_lock.acquire()
# We update the cache here because maybe it has changed in the meantime
(values, need_computation) = self._get_aggregations_to_compute(
view, aggregations, no_cache=search
)
used_lock = True
# Do we need to compute some aggregations?
if need_computation or with_total:
db_result = self.get_field_aggregations(
view,
[
(n["instance"], n["aggregation_type"])
for n in need_computation.values()
],
model,
with_total=with_total,
search=search,
)
if not search:
to_cache = {}
for key, value in db_result.items():
# We don't cache total value
if key != "total":
to_cache[self._get_aggregation_value_cache_key(view, key)] = {
"value": value,
"version": need_computation[key]["version"],
}
# Let's cache the newly computed values
cache.set_many(to_cache)
# Merged cached values and computed one
values.update(db_result)
if used_lock:
try:
cache_lock.release()
except LockNotOwnedError:
# If the lock release fails, it might be because of the timeout
# and it's been stolen so we don't really care
pass
return values
def get_field_aggregations(
self,
view: View,
aggregations: Iterable[Tuple[django_models.Field, str]],
model: Table = None,
model: Union[GeneratedTableModel, None] = None,
with_total: bool = False,
search: Union[str, None] = None,
) -> Dict[str, Any]:
"""
Returns a dict of aggregation for given (field, aggregation_type) couple.
Each dict key is the name of the field suffixed with two `_` then the
aggregation type. ex: "field_42__empty_count" for the empty count
aggregation of field with id 42.
Returns a dict of aggregation for given (field, aggregation_type) couple list.
The dict keys are field names and value are aggregation values. The total is
included in result if the with_total is specified.
:param view: The view to get the field aggregation for.
:param aggregations: A list of (field_instance, aggregation_type).
@ -857,12 +1111,13 @@ class ViewHandler:
query from, if not specified then the model will be generated
automatically.
:param with_total: Whether the total row count should be returned in the
result.
result.
:param search: the search string to considerate.
:raises FieldAggregationNotSupported: When the view type doesn't support
field aggregation.
:raises FieldNotInTable: When the field does not belong to the specified
:raises FieldNotInTable: When one of the field doesn't belong to the specified
view.
:returns: A dict of aggregation value
:returns: A dict of aggregation values
"""
if model is None:
@ -878,12 +1133,16 @@ class ViewHandler:
f"Field aggregation is not supported for {view_type.type} views."
)
# Apply filters to have accurate aggregation
# Apply filters and search to have accurate aggregations
if view_type.can_filter:
queryset = self.apply_filters(view, queryset)
if search is not None:
queryset = queryset.search_all_fields(search)
aggregation_dict = {}
for (field_instance, aggregation_type_name) in aggregations:
field_name = field_instance.db_column
# Check whether the field belongs to the table.
if field_instance.table_id != view.table_id:
@ -892,24 +1151,20 @@ class ViewHandler:
f"{view.table.id}."
)
# Prepare data for .get_aggregation call
field = model._field_objects[field_instance.id]["field"]
field_name = field_instance.db_column
model_field = model._meta.get_field(field_name)
aggregation_type = view_aggregation_type_registry.get(aggregation_type_name)
# Add the aggregation for the field
aggregation_dict[
f"{field_name}__{aggregation_type_name}"
] = aggregation_type.get_aggregation(field_name, model_field, field)
aggregation_dict[field_name] = aggregation_type.get_aggregation(
field_name, model_field, field
)
# Add total to allow further calculation on the client if required
if with_total:
# Add total to allow further calculation on the client
aggregation_dict["total"] = Count("id", distinct=True)
result = queryset.aggregate(**aggregation_dict)
return result
return queryset.aggregate(**aggregation_dict)
def rotate_view_slug(self, user, view):
"""

View file

@ -1,4 +1,4 @@
from typing import Callable, Union, List
from typing import TYPE_CHECKING, Callable, Union, List, Iterable, Tuple
from django.contrib.auth.models import User as DjangoUser
from django.db import models as django_models
@ -21,6 +21,9 @@ from baserow.core.registry import (
)
from baserow.contrib.database.fields import models as field_models
if TYPE_CHECKING:
from baserow.contrib.database.views.models import View
from .exceptions import (
ViewTypeAlreadyRegistered,
ViewTypeDoesNotExist,
@ -400,6 +403,52 @@ class ViewType(
"`get_hidden_field_options`"
)
def get_aggregations(
self, view: "View"
) -> Iterable[Tuple[django_models.Field, str]]:
"""
Should return the aggregation list for the specified view.
returns a list of tuple (Field, aggregation_type)
"""
raise NotImplementedError(
"If the view supports field aggregation it must implement "
"`get_aggregations` method."
)
def after_field_value_update(
self, updated_fields: Union[Iterable[field_models.Field], field_models.Field]
):
"""
Triggered for each field table value modification. This method is generally
called after a row modification, creation, deletion and is called for each
directly or indirectly modified field value. The method can be called multiple
times for an event but with different fields. This hook gives a view type the
opportunity to react on any value change for a field.
:param update_fields: a unique or a list of affected field.
"""
def after_field_update(
self, updated_fields: Union[Iterable[field_models.Field], field_models.Field]
):
"""
Triggered after a field has been updated, created, deleted. This method is
called for each group of field directly or indirectly modified this way.
This hook gives a view type the opportunity to react on any change for a field.
:param update_fields: a unique or a list of modified field.
"""
def after_filter_update(self, view: "View"):
"""
Triggered after a view filter change. This hook gives a view type the
opportunity to react on any filter update.
:param view: the view which filter has changed.
"""
class ViewTypeRegistry(
APIUrlsRegistryMixin, CustomFieldsRegistryMixin, ModelRegistryMixin, Registry

View file

@ -1,3 +1,4 @@
from collections import defaultdict
from django.urls import path, include
from rest_framework.serializers import PrimaryKeyRelatedField
@ -75,6 +76,8 @@ class GridViewType(ViewType):
"width": field_option.width,
"hidden": field_option.hidden,
"order": field_option.order,
"aggregation_type": field_option.aggregation_type,
"aggregation_raw_type": field_option.aggregation_raw_type,
}
)
@ -135,10 +138,24 @@ class GridViewType(ViewType):
"""
fields_dict = {field.id: field for field in fields}
for field_id, options in field_options.items():
field = fields_dict.get(int(field_id), None)
aggregation_raw_type = options.get("aggregation_raw_type")
if aggregation_raw_type and field:
try:
# Invalidate cache if new aggregation raw type has changed
prev_options = GridViewFieldOptions.objects.only(
"aggregation_raw_type"
).get(field=field, grid_view=view)
if prev_options.aggregation_raw_type != aggregation_raw_type:
ViewHandler().clear_aggregation_cache(view, field.db_column)
except GridViewFieldOptions.DoesNotExist:
pass
# Checks if the aggregation raw type is compatible with the field type
aggregation_type = view_aggregation_type_registry.get(
aggregation_raw_type
)
@ -152,27 +169,35 @@ class GridViewType(ViewType):
Check field option aggregation_raw_type compatibility with the new field type.
"""
field_options = GridViewFieldOptions.objects.filter(field=field).select_related(
"grid_view"
field_options = (
GridViewFieldOptions.objects.filter(field=field)
.exclude(aggregation_raw_type="")
.select_related("grid_view")
)
for field_option in field_options:
raw_type = field_option.aggregation_raw_type
if raw_type:
aggregation_type = view_aggregation_type_registry.get(raw_type)
view_handler = ViewHandler()
if not aggregation_type.field_is_compatible(field):
# The field has an aggregation and the type is not compatible with
# the new field, so we need to clean the aggregation.
ViewHandler().update_field_options(
view=field_option.grid_view,
field_options={
field.id: {
"aggregation_type": "",
"aggregation_raw_type": "",
}
},
)
for field_option in field_options:
aggregation_type = view_aggregation_type_registry.get(
field_option.aggregation_raw_type
)
view_handler.clear_aggregation_cache(
field_option.grid_view, field.db_column
)
if not aggregation_type.field_is_compatible(field):
# The field has an aggregation and the type is not compatible with
# the new field, so we need to clean the aggregation.
view_handler.update_field_options(
view=field_option.grid_view,
field_options={
field.id: {
"aggregation_type": "",
"aggregation_raw_type": "",
}
},
)
def get_visible_field_options_in_order(self, grid_view):
return (
@ -184,6 +209,58 @@ class GridViewType(ViewType):
def get_hidden_field_options(self, grid_view):
return grid_view.get_field_options(create_if_missing=False).filter(hidden=True)
def get_aggregations(self, grid_view):
"""
Returns the (Field, aggregation_type) list computed from the field options for
the specified view.
"""
field_options = (
GridViewFieldOptions.objects.filter(grid_view=grid_view)
.exclude(aggregation_raw_type="")
.select_related("field")
)
return [(option.field, option.aggregation_raw_type) for option in field_options]
def after_field_value_update(self, updated_fields):
"""
When a field value change, we need to invalidate the aggregation cache for this
field.
"""
to_clear = defaultdict(list)
view_map = {}
for field in updated_fields:
field_options = (
GridViewFieldOptions.objects.filter(field=field)
.exclude(aggregation_raw_type="")
.select_related("grid_view")
)
for options in field_options:
to_clear[options.grid_view.id].append(field.db_column)
view_map[options.grid_view.id] = options.grid_view
view_handler = ViewHandler()
for view_id, names in to_clear.items():
view_handler.clear_aggregation_cache(view_map[view_id], names + ["total"])
def after_field_update(self, updated_fields):
"""
When a field configuration is changed, we need to invalid the cache for
corresponding aggregations also.
"""
self.after_field_value_update(updated_fields)
def after_filter_update(self, grid_view):
"""
If the view filters change we also need to invalid the aggregation cache for all
fields of this view.
"""
ViewHandler().clear_full_aggregation_cache(grid_view)
class GalleryViewType(ViewType):
type = "gallery"

View file

@ -1,7 +1,9 @@
from decimal import Decimal
from typing import List, Dict, Any
import pytest
from django.shortcuts import reverse
from django.core.cache import cache
from rest_framework import serializers
from rest_framework.fields import Field
from rest_framework.status import (
@ -537,7 +539,7 @@ def test_field_aggregation(api_client, data_fixture):
}
)
# Count empty "Color" field
# Count empty boolean field
response = api_client.get(
url + f"?type=empty_count",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
@ -574,7 +576,7 @@ def test_field_aggregation(api_client, data_fixture):
assert response_json == {"value": 1, "total": 4}
# Does it works with filter
# Does it work with filter
data_fixture.create_view_filter(
view=grid, field=number_field, type="higher_than", value="10"
)
@ -590,6 +592,705 @@ def test_field_aggregation(api_client, data_fixture):
assert response_json == {"value": 1}
@pytest.mark.django_db
def test_view_aggregations(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="test@test.nl", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(
table=table, order=0, name="Color", text_default="white"
)
number_field = data_fixture.create_number_field(
table=table, order=1, name="Horsepower"
)
boolean_field = data_fixture.create_boolean_field(
table=table, order=2, name="For sale"
)
grid = data_fixture.create_grid_view(table=table)
grid_2 = data_fixture.create_grid_view()
# Test missing grid view
url = reverse(
"api:database:views:grid:field-aggregations",
kwargs={"view_id": 9999},
)
response = api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_GRID_DOES_NOT_EXIST"
# Test user not authorized
url = reverse(
"api:database:views:grid:field-aggregations",
kwargs={"view_id": grid_2.id},
)
response = api_client.get(
url + f"?type=empty_count",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_USER_NOT_IN_GROUP"
# Test missing auth token
url = reverse(
"api:database:views:grid:field-aggregations",
kwargs={"view_id": grid.id},
)
response = api_client.get(url)
assert response.status_code == HTTP_401_UNAUTHORIZED
url = reverse(
"api:database:views:grid:field-aggregations",
kwargs={"view_id": grid.id},
)
# Test normal response with no data and no aggregation
response = api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json == {}
field_option1 = data_fixture.create_grid_view_field_option(
grid_view=grid,
field=text_field,
aggregation_type="",
aggregation_raw_type="",
)
field_option2 = data_fixture.create_grid_view_field_option(
grid_view=grid,
field=number_field,
aggregation_type="whatever",
aggregation_raw_type="sum",
)
field_option3 = data_fixture.create_grid_view_field_option(
grid_view=grid,
field=boolean_field,
aggregation_type="whatever",
aggregation_raw_type="empty_count",
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") is None
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") is None
assert cache.get(f"aggregation_value__{grid.id}_{boolean_field.db_column}") is None
assert (
cache.get(f"aggregation_version__{grid.id}_{boolean_field.db_column}") is None
)
# Test normal response with no data and no cache
response = api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json == {number_field.db_column: None, boolean_field.db_column: 0}
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": None,
"version": 1,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") is None
assert cache.get(f"aggregation_value__{grid.id}_{boolean_field.db_column}") == {
"value": 0,
"version": 1,
}
assert (
cache.get(f"aggregation_version__{grid.id}_{boolean_field.db_column}") is None
)
# Test normal response that use cache
cache.set(
f"aggregation_value__{grid.id}_{number_field.db_column}",
{"value": "sentinel", "version": 1},
)
cache.set(
f"aggregation_value__{grid.id}_{boolean_field.db_column}",
{"value": "sentinel", "version": 1},
)
response = api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json == {
number_field.db_column: "sentinel",
boolean_field.db_column: "sentinel",
}
cache.set(
f"aggregation_value__{grid.id}_{number_field.db_column}",
{"value": "sentinel", "version": 1},
)
cache.set(
f"aggregation_value__{grid.id}_{boolean_field.db_column}",
{"value": "sentinel", "version": 3},
)
cache.set(
f"aggregation_version__{grid.id}_{boolean_field.db_column}",
3,
)
# Add data through the API to trigger cache update
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{text_field.id}": "Green",
f"field_{number_field.id}": 10,
f"field_{boolean_field.id}": True,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": "sentinel",
"version": 1,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 2
assert cache.get(f"aggregation_value__{grid.id}_{boolean_field.db_column}") == {
"value": "sentinel",
"version": 3,
}
assert cache.get(f"aggregation_version__{grid.id}_{boolean_field.db_column}") == 4
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{text_field.id}": "",
f"field_{number_field.id}": 0,
f"field_{boolean_field.id}": False,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{text_field.id}": None,
f"field_{number_field.id}": 1200,
f"field_{boolean_field.id}": False,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Test normal response with data
response = api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json == {number_field.db_column: 1210.0, boolean_field.db_column: 2}
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": 1210.0,
"version": 4,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 4
assert cache.get(f"aggregation_value__{grid.id}_{boolean_field.db_column}") == {
"value": 2,
"version": 6,
}
assert cache.get(f"aggregation_version__{grid.id}_{boolean_field.db_column}") == 6
# with total
response = api_client.get(
url + f"?include=total",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json == {
number_field.db_column: 1210.0,
boolean_field.db_column: 2,
"total": 3,
}
# Does it work with filter
response = api_client.get(
url + f"?include=total&search=GREE",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json == {
number_field.db_column: 10.0,
boolean_field.db_column: 0,
"total": 1,
}
# But cache shouldn't be modified after a search as we don't use the cache
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 4
assert cache.get(f"aggregation_version__{grid.id}_{boolean_field.db_column}") == 6
# Does it work with filter (use API to trigger cache update)
response = api_client.post(
reverse("api:database:views:list_filters", kwargs={"view_id": grid.id}),
{"field": number_field.id, "type": "higher_than", "value": "10"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Cache should be invalidated on filter change
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1210),
"version": 4,
}
assert cache.get(f"aggregation_value__{grid.id}_{boolean_field.db_column}") == {
"value": 2,
"version": 6,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 5
assert cache.get(f"aggregation_version__{grid.id}_{boolean_field.db_column}") == 7
response = api_client.get(
url + f"?include=total",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json == {
number_field.db_column: 1200.0,
boolean_field.db_column: 1,
"total": 1,
}
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1200),
"version": 5,
}
assert cache.get(f"aggregation_value__{grid.id}_{boolean_field.db_column}") == {
"value": 1,
"version": 7,
}
@pytest.mark.django_db
def test_view_aggregations_cache_invalidation_with_dependant_fields(
api_client, data_fixture
):
"""
Here we want a complex situation where we need to invalidate the cache of a
dependant field in another table. Should be the more extreme scenario.
We create two tables with a link row field from table2 to table1, a lookup field on
table 2 to the number field on table 1 and a formula that sum the values of
table 2's lookup field.
"""
user, token = data_fixture.create_user_and_token(
email="test@test.nl", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
table2 = data_fixture.create_database_table(user=user, database=table.database)
number_field = data_fixture.create_number_field(
table=table, order=1, name="Horsepower"
)
grid = data_fixture.create_grid_view(table=table)
grid2 = data_fixture.create_grid_view(table=table2)
linkrowfield = FieldHandler().create_field(
user,
table2,
"link_row",
name="linkrowfield",
link_row_table=table,
)
lookup_field = FieldHandler().create_field(
user,
table2,
"lookup",
name="lookup_field",
through_field_id=linkrowfield.id,
target_field_id=number_field.id,
)
sum_formula_on_lookup_field = FieldHandler().create_field(
user,
table2,
"formula",
name="sum",
formula='sum(field("lookup_field"))',
)
# Create some aggregations
data_fixture.create_grid_view_field_option(
grid_view=grid,
field=number_field,
aggregation_type="whatever",
aggregation_raw_type="sum",
)
data_fixture.create_grid_view_field_option(
grid_view=grid2,
field=sum_formula_on_lookup_field,
aggregation_type="whatever",
aggregation_raw_type="sum",
)
# Define some utilities functions
def add_value_to_table1(value):
# Add data through the API to trigger cache update
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{number_field.id}": value,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
return response.json()
def update_value_of_table1(row, value):
api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row["id"]},
),
{f"field_{number_field.id}": value},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
def delete_row_of_table1(row):
api_client.delete(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row["id"]},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
def add_link_to_table2(links):
# Add data through the API to trigger cache update
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table2.id}),
{
f"field_{linkrowfield.id}": [link["id"] for link in links],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
return response.json()
def check_table_2_aggregation_values(value_to_check, ident):
response = api_client.get(
url2,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
response_json = response.json()
assert response_json == value_to_check, ident
url = reverse(
"api:database:views:grid:field-aggregations",
kwargs={"view_id": grid.id},
)
url2 = reverse(
"api:database:views:grid:field-aggregations",
kwargs={"view_id": grid2.id},
)
row1 = add_value_to_table1(1)
row2 = add_value_to_table1(10)
row3 = add_value_to_table1(100)
row4 = add_value_to_table1(1000)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") is None
assert (
cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
is None
)
api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1111),
"version": 5,
}
assert (
cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
is None
)
check_table_2_aggregation_values(
{sum_formula_on_lookup_field.db_column: None}, "with no link"
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1111),
"version": 5,
}
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {"value": None, "version": 5}
cache.set(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}",
{"value": "sentinel", "version": 0},
)
# Add few links
add_link_to_table2([row1, row2])
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {"value": "sentinel", "version": 0}
add_link_to_table2([row2, row3])
add_link_to_table2([row3, row4])
add_link_to_table2([row4])
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1111),
"version": 5,
}
check_table_2_aggregation_values(
{sum_formula_on_lookup_field.db_column: 2221}, "after link addition"
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1111),
"version": 5,
}
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(2221),
"version": 17,
}
update_value_of_table1(row2, 10000)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal(1111),
"version": 5,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 6
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(2221),
"version": 17,
}
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 18
)
check_table_2_aggregation_values(
{sum_formula_on_lookup_field.db_column: 22201}, "after table 1 value update"
)
# Delete row3 from table1
api_client.delete(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row3["id"]},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal("1111"),
"version": 5,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 7
# Should increment cache version
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 19
)
check_table_2_aggregation_values(
{sum_formula_on_lookup_field.db_column: 22001}, "after row deletion"
)
assert cache.get(f"aggregation_value__{grid.id}_{number_field.db_column}") == {
"value": Decimal("1111"),
"version": 5,
}
assert cache.get(f"aggregation_version__{grid.id}_{number_field.db_column}") == 7
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(22001),
"version": 19,
}
# Restore delete row
api_client.patch(
reverse(
"api:trash:restore",
),
{
"trash_item_type": "row",
"trash_item_id": row3["id"],
"parent_trash_item_id": table.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 20
)
check_table_2_aggregation_values(
{sum_formula_on_lookup_field.db_column: 22201}, "after row restoration"
)
# Should store the new value/version in cache
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(22201),
"version": 20,
}
# Update number field
api_client.patch(
reverse(
"api:database:fields:item",
kwargs={"field_id": number_field.id},
),
{"number_decimal_places": 1},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Cache version should be incremented
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 21
)
check_table_2_aggregation_values(
{sum_formula_on_lookup_field.db_column: 22201}, "after field modification"
)
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(22201),
"version": 21,
}
# Delete number field
api_client.delete(
reverse(
"api:database:fields:item",
kwargs={"field_id": number_field.id},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(22201),
"version": 21,
}
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 22
)
check_table_2_aggregation_values({}, "after field deletion")
# No modification as the field and the aggregation don't exist
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(22201),
"version": 21,
}
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 22
)
# Restore deleted field
resp = api_client.patch(
reverse(
"api:trash:restore",
),
{
"trash_item_type": "field",
"trash_item_id": number_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response = api_client.get(
url,
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
assert response.json() == {number_field.db_column: 11101}
# The field aggregation has been automatically deleted so no aggregations anymore
check_table_2_aggregation_values({}, "after field restoration")
# Still no modifications
assert cache.get(
f"aggregation_value__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
) == {
"value": Decimal(22201),
"version": 21,
}
assert (
cache.get(
f"aggregation_version__{grid2.id}_{sum_formula_on_lookup_field.db_column}"
)
== 22
)
@pytest.mark.django_db
def test_patch_grid_view_field_options(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(

View file

@ -58,37 +58,41 @@ def test_view_empty_count_aggregation(data_fixture):
text_field,
"empty_count",
),
(
boolean_field,
"empty_count",
),
(
number_field,
"empty_count",
),
],
)
assert result[f"field_{text_field.id}"] == 2
assert result[f"field_{boolean_field.id}"] == 2
assert result[f"field_{number_field.id}"] == 1
result = view_handler.get_field_aggregations(
grid_view,
[
(
text_field,
"not_empty_count",
),
(
boolean_field,
"empty_count",
),
(
boolean_field,
"not_empty_count",
),
(
number_field,
"empty_count",
),
(
number_field,
"not_empty_count",
),
],
)
assert result[f"field_{text_field.id}__empty_count"] == 2
assert result[f"field_{text_field.id}__not_empty_count"] == 2
assert result[f"field_{boolean_field.id}__empty_count"] == 2
assert result[f"field_{boolean_field.id}__not_empty_count"] == 2
assert result[f"field_{number_field.id}__empty_count"] == 1
assert result[f"field_{number_field.id}__not_empty_count"] == 3
assert result[f"field_{text_field.id}"] == 2
assert result[f"field_{boolean_field.id}"] == 2
assert result[f"field_{number_field.id}"] == 3
result = view_handler.get_field_aggregations(
grid_view,
@ -123,6 +127,13 @@ def test_view_empty_count_aggregation_for_interesting_table(data_fixture):
)
)
result_empty = view_handler.get_field_aggregations(
grid_view, aggregation_query, model=model, with_total=True
)
aggregation_query = []
for field in model._field_objects.values():
aggregation_query.append(
(
field["field"],
@ -130,16 +141,15 @@ def test_view_empty_count_aggregation_for_interesting_table(data_fixture):
)
)
result = view_handler.get_field_aggregations(
grid_view, aggregation_query, model=model, with_total=True
result_not_emtpy = view_handler.get_field_aggregations(
grid_view, aggregation_query, model=model
)
for field in model._field_objects.values():
field_id = field["field"].id
assert (
result[f"field_{field_id}__empty_count"]
+ result[f"field_{field_id}__not_empty_count"]
== result["total"]
result_empty[field["field"].db_column]
+ result_not_emtpy[field["field"].db_column]
== result_empty["total"]
)
@ -178,9 +188,9 @@ def test_view_unique_count_aggregation_for_interesting_table(data_fixture):
field_type = field["type"].type
if field_type in ["url", "email", "rating", "phone_number"]:
assert result[f"field_{field_id}__unique_count"] == 2
assert result[f"field_{field_id}"] == 2
else:
assert result[f"field_{field_id}__unique_count"] == 1
assert result[f"field_{field_id}"] == 1
@pytest.mark.django_db
@ -199,13 +209,13 @@ def test_view_number_aggregation(data_fixture):
for i in range(30):
model.objects.create(
**{
f"field_{number_field.id}": random.randint(0, 100),
number_field.db_column: random.randint(0, 100),
}
)
model.objects.create(
**{
f"field_{number_field.id}": None,
number_field.db_column: None,
}
)
@ -216,45 +226,87 @@ def test_view_number_aggregation(data_fixture):
number_field,
"min",
),
],
)
assert result[number_field.db_column] == 1
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"max",
),
],
)
assert result[number_field.db_column] == 94
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"sum",
),
],
)
assert result[number_field.db_column] == 1546
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"average",
),
],
)
assert round(result[number_field.db_column], 2) == Decimal("51.53")
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"median",
),
],
)
assert round(result[number_field.db_column], 2) == Decimal("52.5")
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"std_dev",
),
],
)
assert round(result[number_field.db_column], 2) == Decimal("26.73")
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"variance",
),
],
)
assert round(result[number_field.db_column], 2) == Decimal("714.72")
result = view_handler.get_field_aggregations(
grid_view,
[
(
number_field,
"decile",
),
],
)
assert result[f"field_{number_field.id}__min"] == 1
assert result[f"field_{number_field.id}__max"] == 94
assert result[f"field_{number_field.id}__sum"] == 1546
assert round(result[f"field_{number_field.id}__median"], 2) == Decimal("52.5")
assert round(result[f"field_{number_field.id}__average"], 2) == Decimal("51.53")
assert round(result[f"field_{number_field.id}__std_dev"], 2) == Decimal("26.73")
assert round(result[f"field_{number_field.id}__variance"], 2) == Decimal("714.72")
assert result[f"field_{number_field.id}__decile"] == [
assert result[number_field.db_column] == [
19.5,
22.8,
33.7,

View file

@ -545,8 +545,8 @@ def test_update_field_options(send_mock, data_fixture):
def test_grid_view_aggregation_type_field_option(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
grid_view = data_fixture.create_grid_view(table=table)
field_1 = data_fixture.create_text_field(table=table)
grid_view = data_fixture.create_grid_view(table=table)
# Fake incompatible field
empty_count = view_aggregation_type_registry.get("empty_count")

View file

@ -24,7 +24,10 @@ def test_import_export_grid_view(data_fixture):
field = data_fixture.create_text_field(table=grid_view.table)
imported_field = data_fixture.create_text_field(table=grid_view.table)
field_option = data_fixture.create_grid_view_field_option(
grid_view=grid_view, field=field
grid_view=grid_view,
field=field,
aggregation_type="whatever",
aggregation_raw_type="empty",
)
view_filter = data_fixture.create_view_filter(
view=grid_view, field=field, value="test", type="equal"
@ -65,6 +68,10 @@ def test_import_export_grid_view(data_fixture):
assert field_option.width == imported_field_option.width
assert field_option.hidden == imported_field_option.hidden
assert field_option.order == imported_field_option.order
assert field_option.aggregation_type == imported_field_option.aggregation_type
assert (
field_option.aggregation_raw_type == imported_field_option.aggregation_raw_type
)
@pytest.mark.django_db

View file

@ -28,6 +28,8 @@
* Fixed a bug that truncated characters for email in the sidebar.
* Fixed a bug that would sometimes cancel multi-cell selection.
* Upgraded node runtime to v16.14.0
* Cache aggregation values to improve performances
* Added new endpoint to get all configured aggregations for a grid view
## Released (2022-03-03 1.9.1)

View file

@ -75,14 +75,24 @@ export default {
required: true,
},
},
data() {
return { pendingValueUpdate: false }
},
computed: {
aggregationType() {
return this.fieldOptions[this.field.id]?.aggregation_type
},
aggregationRawType() {
return this.fieldOptions[this.field.id]?.aggregation_raw_type
},
value() {
if (this.fieldAggregationData[this.field.id] !== undefined) {
const { value } = this.fieldAggregationData[this.field.id]
if (isNaN(value)) {
return null
}
return this.viewAggregationType.getValue(value, {
rowCount: this.rowCount,
field: this.field,
@ -111,21 +121,19 @@ export default {
},
},
watch: {
aggregationType(value) {
aggregationRawType(value) {
if (!value) {
return
}
this.$store.dispatch(
this.storePrefix + 'view/grid/fetchFieldAggregationData',
{
view: this.view,
fieldId: this.field.id,
options: {
aggregation_raw_type: this.viewAggregationType.getRawType(),
aggregation_type: this.viewAggregationType.getType(),
},
}
)
// If an update is already pending, we don't need this one.
if (!this.pendingValueUpdate) {
this.$store.dispatch(
this.storePrefix + 'view/grid/fetchAllFieldAggregationData',
{
view: this.view,
}
)
}
},
},
beforeCreate() {
@ -159,13 +167,19 @@ export default {
values.aggregation_raw_type = selectedAggregation.getRawType()
}
await this.$store.dispatch(
this.storePrefix + 'view/grid/updateFieldOptionsOfField',
{
field: this.field,
values,
}
)
// Prevent the watcher to trigger while value is not yet saved on server
this.pendingValueUpdate = true
try {
await this.$store.dispatch(
this.storePrefix + 'view/grid/updateFieldOptionsOfField',
{
field: this.field,
values,
}
)
} finally {
this.pendingValueUpdate = false
}
},
},
}

View file

@ -101,14 +101,20 @@ export default (client) => {
fetchPublicViewInfo(viewSlug) {
return client.get(`/database/views/grid/${viewSlug}/public/info/`)
},
fetchFieldAggregation(gridId, fieldId, rawType) {
fetchFieldAggregations({ gridId, search, signal = null }) {
const params = new URLSearchParams()
params.append('type', rawType)
return client.get(
`/database/views/grid/${gridId}/aggregation/${fieldId}/`,
{ params }
)
if (search) {
params.append('search', search)
}
const config = { params }
if (signal !== null) {
config.signal = signal
}
return client.get(`/database/views/grid/${gridId}/aggregations/`, config)
},
}
}

View file

@ -490,6 +490,9 @@ let lastRefreshRequest = null
let lastRefreshRequestSource = null
let lastSource = null
// We want to cancel previous aggregation request before creating a new one.
const lastAggregationRequest = { request: null, controller: null }
export const actions = {
/**
* This action calculates which rows we would like to have in the buffer based on
@ -888,9 +891,28 @@ export const actions = {
* backend with the changed values. If the request fails the action is reverted.
*/
async updateFieldOptionsOfField(
{ commit, getters },
{ commit, getters, dispatch },
{ field, values, oldValues, readOnly = false }
) {
const previousOptions = getters.getAllFieldOptions[field.id]
let needAggregationValueUpdate = false
/**
* If the aggregation raw type has changed, we delete the corresponding the
* aggregation value from the store.
*/
if (
Object.prototype.hasOwnProperty.call(values, 'aggregation_raw_type') &&
values.aggregation_raw_type !== previousOptions.aggregation_raw_type
) {
needAggregationValueUpdate = true
commit('SET_FIELD_AGGREGATION_DATA', { fieldId: field.id, value: null })
commit('SET_FIELD_AGGREGATION_DATA_LOADING', {
fieldId: field.id,
value: true,
})
}
commit('UPDATE_FIELD_OPTIONS_OF_FIELD', {
fieldId: field.id,
values,
@ -912,6 +934,10 @@ export const actions = {
values: oldValues,
})
throw error
} finally {
if (needAggregationValueUpdate && values.aggregation_type) {
dispatch('fetchAllFieldAggregationData', { view: { id: gridId } })
}
}
}
},
@ -919,7 +945,7 @@ export const actions = {
* Updates the field options of a given field in the store. So no API request to
* the backend is made.
*/
setFieldOptionsOfField({ commit }, { field, values }) {
setFieldOptionsOfField({ commit, getters }, { field, values }) {
commit('UPDATE_FIELD_OPTIONS_OF_FIELD', {
fieldId: field.id,
values,
@ -956,61 +982,100 @@ export const actions = {
forceUpdateAllFieldOptions({ commit }, fieldOptions) {
commit('UPDATE_ALL_FIELD_OPTIONS', fieldOptions)
},
async fetchFieldAggregationData({ commit, getters }, { view, fieldId }) {
const options = getters.getAllFieldOptions[fieldId]
/**
* Fetch all field aggregation data from the server for this view. Set loading state
* to true while doing the query. Do nothing if this is a public view or if there is
* no aggregation at all. If the query goes in error, the values are set to `null`
* to prevent wrong information.
* If a request is already in progress, it is aborted in favour of the new one.
*/
async fetchAllFieldAggregationData({ getters, commit }, { view }) {
const isPublic = getters.isPublic
const search = getters.getActiveSearchTerm
if (!options?.aggregation_raw_type || isPublic) {
if (isPublic) {
return
}
commit('SET_FIELD_AGGREGATION_DATA_LOADING', {
fieldId,
value: true,
})
commit('SET_FIELD_AGGREGATION_DATA', {
fieldId,
value: null,
})
try {
const {
data: { value },
} = await GridService(this.$client).fetchFieldAggregation(
view.id,
fieldId,
options.aggregation_raw_type
)
commit('SET_FIELD_AGGREGATION_DATA', {
fieldId,
value,
})
} catch (e) {
// Emptied the value
commit('SET_FIELD_AGGREGATION_DATA', {
fieldId,
value: null,
})
throw e
} finally {
commit('SET_FIELD_AGGREGATION_DATA_LOADING', {
fieldId,
value: false,
})
}
},
fetchAllFieldAggregationData({ dispatch, getters }, { view }) {
const fieldOptions = getters.getAllFieldOptions
return Promise.all(
Object.keys(fieldOptions).map((fieldId) => {
return dispatch('fetchFieldAggregationData', {
view,
let atLeastOneAggregation = false
Object.entries(fieldOptions).forEach(([fieldId, options]) => {
if (options.aggregation_raw_type) {
commit('SET_FIELD_AGGREGATION_DATA_LOADING', {
fieldId,
value: true,
})
atLeastOneAggregation = true
}
})
if (!atLeastOneAggregation) {
return
}
try {
if (lastAggregationRequest.request !== null) {
lastAggregationRequest.controller.abort()
}
lastAggregationRequest.controller = new AbortController()
lastAggregationRequest.request = GridService(
this.$client
).fetchFieldAggregations({
gridId: view.id,
search,
signal: lastAggregationRequest.controller.signal,
})
)
const { data } = await lastAggregationRequest.request
lastAggregationRequest.request = null
Object.entries(fieldOptions).forEach(([fieldId, options]) => {
if (options.aggregation_raw_type) {
commit('SET_FIELD_AGGREGATION_DATA', {
fieldId,
value: data[`field_${fieldId}`],
})
}
})
Object.entries(fieldOptions).forEach(([fieldId, options]) => {
if (options.aggregation_raw_type) {
commit('SET_FIELD_AGGREGATION_DATA_LOADING', {
fieldId,
value: false,
})
}
})
} catch (error) {
if (!axios.isCancel(error)) {
lastAggregationRequest.request = null
// Emptied the values
Object.entries(fieldOptions).forEach(([fieldId, options]) => {
if (options.aggregation_raw_type) {
commit('SET_FIELD_AGGREGATION_DATA', {
fieldId,
value: null,
})
}
})
// Remove loading state
Object.entries(fieldOptions).forEach(([fieldId, options]) => {
if (options.aggregation_raw_type) {
commit('SET_FIELD_AGGREGATION_DATA_LOADING', {
fieldId,
value: false,
})
}
})
throw error
}
}
},
/**
* Updates the order of all the available field options. The provided order parameter

View file

@ -177,6 +177,9 @@ export class NotEmptyCountViewAggregationType extends ViewAggregationType {
}
getValue(value, { rowCount }) {
if (rowCount === 0) {
return null
}
return rowCount - value
}
@ -227,6 +230,9 @@ export class CheckedCountViewAggregationType extends ViewAggregationType {
}
getValue(value, { rowCount }) {
if (rowCount === 0) {
return null
}
return rowCount - value
}
@ -279,6 +285,9 @@ export class EmptyPercentageViewAggregationType extends ViewAggregationType {
}
getValue(value, { rowCount }) {
if (rowCount === 0) {
return null
}
return `${Math.round((value / rowCount) * 100)}%`
}
@ -331,6 +340,9 @@ export class NotEmptyPercentageViewAggregationType extends ViewAggregationType {
}
getValue(value, { rowCount }) {
if (rowCount === 0) {
return null
}
return `${Math.round(((rowCount - value) / rowCount) * 100)}%`
}
@ -363,6 +375,9 @@ export class NotCheckedPercentageViewAggregationType extends ViewAggregationType
}
getValue(value, { rowCount }) {
if (rowCount === 0) {
return null
}
return `${Math.round((value / rowCount) * 100)}%`
}
@ -395,6 +410,9 @@ export class CheckedPercentageViewAggregationType extends ViewAggregationType {
}
getValue(value, { rowCount }) {
if (rowCount === 0) {
return null
}
return `${Math.round(((rowCount - value) / rowCount) * 100)}%`
}

View file

@ -133,6 +133,15 @@ export class MockServer {
}
}
getAllFieldAggregationData(viewId, result, error = false) {
const mock = this.mock.onGet(`/database/views/grid/${viewId}/aggregations/`)
if (error) {
mock.replyOnce(500)
} else {
mock.replyOnce(200, result)
}
}
resetMockEndpoints() {
this.mock.reset()
}

View file

@ -64,8 +64,8 @@ describe('Field footer component', () => {
expect(wrapper2.element).toMatchSnapshot()
mockServer.getFieldAggregationData(view.id, 2, 'not_empty_count', {
value: 256,
mockServer.getAllFieldAggregationData(view.id, {
field_2: 256,
})
// let's fetch the data for this field
@ -90,8 +90,8 @@ describe('Field footer component', () => {
id: 2,
}
mockServer.getFieldAggregationData(view.id, 3, 'not_empty_count', {
value: 256,
mockServer.getAllFieldAggregationData(view.id, {
field_3: 256,
})
mockServer.updateFieldOptions(view.id, {
3: {
@ -128,8 +128,8 @@ describe('Field footer component', () => {
expect(wrapper.element).toMatchSnapshot()
mockServer.getFieldAggregationData(view.id, 3, 'empty_count', {
value: 10,
mockServer.getAllFieldAggregationData(view.id, {
field_3: 10,
})
mockServer.updateFieldOptions(view.id, {
3: {

View file

@ -5,7 +5,6 @@ import {
ContainsViewFilterType,
} from '@baserow/modules/database/viewFilters'
import { clone } from '@baserow/modules/core/utils/object'
import flushPromises from 'flush-promises'
describe('Grid view store', () => {
let testApp = null
@ -827,18 +826,13 @@ describe('Grid view store', () => {
gridStore.state = () => state
store.registerModule('grid', gridStore)
const fieldId1 = 2
const fieldId2 = 3
const view = {
id: 1,
}
mockServer.getFieldAggregationData(view.id, fieldId1, 'empty_count', {
value: 84,
})
mockServer.getFieldAggregationData(view.id, fieldId2, 'not_empty_count', {
value: 256,
mockServer.getAllFieldAggregationData(view.id, {
field_2: 84,
field_3: 256,
})
await store.dispatch('grid/fetchAllFieldAggregationData', {
@ -856,17 +850,8 @@ describe('Grid view store', () => {
},
})
// Check if a failing endpoint won't prevent other endpoints to update
mockServer.getFieldAggregationData(
view.id,
fieldId1,
'empty_count',
null,
true
)
mockServer.getFieldAggregationData(view.id, fieldId2, 'not_empty_count', {
value: 100,
})
// What if the query fails?
mockServer.getAllFieldAggregationData(view.id, null, true)
testApp.dontFailOnErrorResponses()
await expect(
@ -883,40 +868,7 @@ describe('Grid view store', () => {
},
3: {
loading: false,
value: 100,
},
})
})
test('fetchFieldAggregationData', async () => {
const state = Object.assign(gridStore.state(), {
fieldAggregationData: {},
fieldOptions: { 2: { aggregation_raw_type: 'empty_count' } },
})
gridStore.state = () => state
store.registerModule('grid', gridStore)
const fieldId = 2
const view = {
id: 1,
}
mockServer.getFieldAggregationData(view.id, fieldId, 'empty_count', {
value: 21,
})
await store.dispatch('grid/fetchFieldAggregationData', {
view,
fieldId,
})
await flushPromises()
expect(clone(store.getters['grid/getAllFieldAggregationData'])).toEqual({
2: {
loading: false,
value: 21,
value: null,
},
})
})