@@ -22,10 +22,15 @@ def __exit__(self, exception_type, exception, traceback):
2222
2323class Query :
2424
25- __slots__ = ("table" ,)
25+ __slots__ = ("table" , "_frozen_querystrings" )
2626
27- def __init__ (self , table : t .Type [Table ]):
27+ def __init__ (
28+ self ,
29+ table : t .Type [Table ],
30+ frozen_querystrings : t .Optional [t .Sequence [QueryString ]] = None ,
31+ ):
2832 self .table = table
33+ self ._frozen_querystrings = frozen_querystrings
2934
3035 @property
3136 def engine_type (self ) -> str :
@@ -156,6 +161,9 @@ def querystrings(self) -> t.Sequence[QueryString]:
156161 """
157162 Calls the correct underlying method, depending on the current engine.
158163 """
164+ if self ._frozen_querystrings is not None :
165+ return self ._frozen_querystrings
166+
159167 engine_type = self .engine_type
160168 if engine_type == "postgres" :
161169 try :
@@ -174,5 +182,86 @@ def querystrings(self) -> t.Sequence[QueryString]:
174182
175183 ###########################################################################
176184
185+ def freeze (self ) -> FrozenQuery :
186+ """
187+ This is a performance optimisation when the same query is run
188+ repeatedly. For example:
189+
190+ .. code-block:: python
191+
192+ TOP_BANDS = Band.select(
193+ Band.name
194+ ).order_by(
195+ Band.popularity,
196+ ascending=False
197+ ).limit(
198+ 10
199+ ).output(
200+ as_json=True
201+ ).freeze()
202+
203+ # In the corresponding view/endpoint of whichever web framework
204+ # you're using:
205+ async def top_bands(self, request):
206+ return await TOP_BANDS.run()
207+
208+ It means that Piccolo doesn't have to work as hard each time the query
209+ is run to generate the corresponding SQL - some of it is cached. If the
210+ query is defined within the view/endpoint, it has to generate the SQL
211+ from scratch each time.
212+
213+ Once a query is frozen, you can't apply any more clauses to it
214+ (``where``, ``limit``, ``output`` etc).
215+
216+ Even though ``freeze`` helps with performance, there are limits to
217+ how much it can help, as most of the time is still spent waiting for a
218+ response from the database. However, for high throughput apps and data
219+ science scripts, it's a worthwhile optimisation.
220+
221+ """
222+ querystrings = self .querystrings
223+ for querystring in querystrings :
224+ querystring .freeze (engine_type = self .engine_type )
225+
226+ # Copy the query, so we don't store any references to the original.
227+ query = self .__class__ (
228+ table = self .table , frozen_querystrings = self .querystrings
229+ )
230+
231+ if hasattr (self , "limit_delegate" ):
232+ # Needed for `response_handler`
233+ query .limit_delegate = self .limit_delegate .copy () # type: ignore
234+
235+ if hasattr (self , "output_delegate" ):
236+ # Needed for `_process_results`
237+ query .output_delegate = self .output_delegate .copy () # type: ignore
238+
239+ return FrozenQuery (query = query )
240+
241+ ###########################################################################
242+
177243 def __str__ (self ) -> str :
178244 return "; " .join ([i .__str__ () for i in self .querystrings ])
245+
246+
247+ class FrozenQuery :
248+ def __init__ (self , query : Query ):
249+ self .query = query
250+
251+ async def run (self , * args , ** kwargs ):
252+ return await self .query .run (* args , ** kwargs )
253+
254+ def run_sync (self , * args , ** kwargs ):
255+ return self .query .run_sync (* args , ** kwargs )
256+
257+ def __getattr__ (self , name : str ):
258+ if hasattr (self .query , name ):
259+ raise AttributeError (
260+ f"This query is frozen - { name } is only available on "
261+ "unfrozen queries."
262+ )
263+ else :
264+ raise AttributeError ("Unrecognised attribute name." )
265+
266+ def __str__ (self ) -> str :
267+ return self .query .__str__ ()
0 commit comments