1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
// Copyright 2020 Oxide Computer Company
/*!
 * Detailed end-user documentation for pagination lives in the Dropshot top-
 * level block comment.  Here we discuss some of the design choices.
 *
 * ## Background: patterns for pagination
 *
 * [In their own API design guidelines, Google describes an approach similar to
 * the one we use][1].  There are many ways to implement the page token with
 * many different tradeoffs.  The one described in the Dropshot top-level block
 * comment has a lot of nice properties:
 *
 * * For APIs backed by a database of some kind, it's usually straightforward to
 *   use an existing primary key or other unique, sortable field (or combination
 *   of fields) as the token.
 *
 * * If the client scans all the way through the collection, they will see every
 *   object that existed both before the scan and after the scan and was not
 *   renamed during the scan.  (This isn't true for schemes that use a simple
 *   numeric offset as the token.)
 *
 * * There's no server-side state associated with the token, so it's no problem
 *   if the server crashes between requests or if subsequent requests are
 *   handled by a different instance.  (This isn't true for schemes that store
 *   the result set on the server.)
 *
 * * It's often straightforward to support a reversed-order scan as well -- this
 *   may just be a matter of flipping the inequality used for a database query.
 *
 * * It's easy to support sorting by a single field, and with some care it's
 *   possible to support queries on multiple different fields, even at the same
 *   time.  An API can support listing by any unique, sortable combination of
 *   fields.  For example, say our Projects have a modification time ("mtime")
 *   as well.  We could support listing projects alphabetically by name _or_ in
 *   order of most recently modified.  For the latter, since the modification
 *   time is generally not unique, and the marker must be unique, we'd really be
 *   listing by an ("mtime" descending, "name" ascending) tuple.
 *
 * The interfaces here are intended to support this sort of use case.  For APIs
 * backed by traditional RDBMS databases, see [this post for background on
 * various ways to page through a large set of data][2].  (What we're describing
 * here leverages what this post calls "keyset pagination".)
 *
 * Another consideration in designing pagination is whether the token ought to
 * be explicit and meaningful to the user or just an opaque token (likely
 * encoded in some way).  It can be convenient for developers to use APIs where
 * the token is explicitly intended to be one of the fields of the object (e.g.,
 * so that you could list animals starting in the middle by just requesting
 * `?animal_name=moose`), but this puts constraints on the server because
 * clients may come to depend on specific fields being supported and sorted in a
 * certain way.  Dropshot takes the approach of using an encoded token that
 * includes information about the whole scan (e.g., the sort order).  This makes
 * it possible to identify cases that might otherwise result in confusing
 * behavior (e.g., a client lists projects in ascending order, but then asks for
 * the next page in descending order).  The token also includes a version number
 * so that it can be evolved in the future.
 *
 *
 * ## Background: Why paginate HTTP APIs in the first place?
 *
 * Pagination helps ensure that the cost of a request in terms of resource
 * utilization remains O(1) -- that is, it can be bounded above by a constant
 * rather than scaling proportionally with any of the request parameters.  This
 * simplifies utilization monitoring, capacity planning, and scale-out
 * activities for the service, since operators can think of the service in terms
 * of one unit that needs to be scaled up.  (It's still a very complex process,
 * but it would be significantly harder if requests weren't O(1).)
 *
 * Similarly, pagination helps ensure that the time required for a request is
 * O(1) under normal conditions.  This makes it easier to define expectations
 * for service latency and to monitor that latency to determine if those
 * expectations are violated.  Generally, if latency increases, then the service
 * is unhealthy, and a crisp definition of "unhealthy" is important to operate a
 * service with high availability.  If requests weren't O(1), an increase in
 * latency might just reflect a changing workload that's still performing within
 * expectations -- e.g., clients listing larger collections than they were
 * before, but still getting results promptly.  That would make it much harder
 * to see when the service really is unhealthy.
 *
 * Finally, bounding requests to O(1) work is a critical mitigation for common
 * (if basic) denial-of-service (DoS) attacks because it requires that clients
 * consume resources commensurate with the server costs that they're imposing.
 * If a service exposes an API that does work proportional to some parameter,
 * then it's cheap to launch a DoS on the service by just invoking that API with
 * a large parameter.  By contrast, if the client has to do work that scales
 * linearly with the work the server has to do, then the client's costs go up in
 * order to scale up the attack.
 *
 * Along these lines, connections and requests consume finite server resources
 * like open file descriptors and database connections.  If a service is built
 * so that requests are all supposed to take about the same amount of time (or
 * at least that there's a constant upper bound), then it may be possible to use
 * a simple timeout scheme to cancel requests that are taking too long, as might
 * happen if a malicious client finds some way to cause requests to hang or take
 * a very long time.
 *
 * [1]: https://cloud.google.com/apis/design/design_patterns#list_pagination
 * [2]: https://www.citusdata.com/blog/2016/03/30/five-ways-to-paginate/
 */

use crate::error::HttpError;
use crate::from_map::from_map;
use base64::URL_SAFE;
use schemars::JsonSchema;
use serde::de::DeserializeOwned;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serialize;
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::num::NonZeroU64;

/**
 * A page of results from a paginated API
 *
 * This structure is intended for use both on the server side (to generate the
 * results page) and on the client side (to parse it).
 */
#[derive(Debug, Deserialize, JsonSchema, Serialize)]
#[schemars(description = "A single page of results")]
pub struct ResultsPage<ItemType> {
    /** token used to fetch the next page of results (if any) */
    pub next_page: Option<String>,
    /** list of items on this page of results */
    pub items: Vec<ItemType>,
}

impl<ItemType> ResultsPage<ItemType> {
    /**
     * Construct a new results page from the list of `items`.  `page_selector`
     * is a function used to construct the page token that clients will provide
     * to fetch the next page of results.  `scan_params` is provided to the
     * `page_selector` function, since the token may depend on the type of scan.
     */
    pub fn new<F, ScanParams, PageSelector>(
        items: Vec<ItemType>,
        scan_params: &ScanParams,
        get_page_selector: F,
    ) -> Result<ResultsPage<ItemType>, HttpError>
    where
        F: Fn(&ItemType, &ScanParams) -> PageSelector,
        PageSelector: Serialize,
    {
        let next_page = items
            .last()
            .map(|last_item| {
                let selector = get_page_selector(last_item, scan_params);
                serialize_page_token(selector)
            })
            .transpose()?;

        Ok(ResultsPage {
            next_page,
            items,
        })
    }
}

/**
 * Querystring parameters provided by clients when scanning a paginated
 * collection
 *
 * To build an API endpoint that paginates results, you have your handler
 * function accept a `Query<PaginationParams<ScanParams, PageSelector>>` and
 * return a [`ResultsPage`].  You define your own `ScanParams` and
 * `PageSelector` types.
 *
 * `ScanParams` describes the set of querystring parameters that your endpoint
 * accepts for the _first_ request of the scan (typically: filters and sort
 * options).  This must be deserializable from a querystring.
 *
 * `PageSelector` describes the information your endpoint needs for requests
 * after the first one.  Typically this would include an id of some sort for the
 * last item on the previous page as well as any parameters related to filtering
 * or sorting so that your function can apply those, too.  The entire
 * `PageSelector` will be serialized to an opaque string and included in the
 * [`ResultsPage`].  The client is expected to provide this string as the
 * `"page_token"` querystring parameter in the subsequent request.
 * `PageSelector` must implement both [`Deserialize`] and [`Serialize`].
 * (Unlike `ScanParams`, `PageSelector` will not be deserialized directly from
 * the querystring.)
 *
 * There are several complete, documented examples in `dropshot/examples`.
 *
 * **NOTE:** Your choices of `ScanParams` and `PageSelector` determine the
 * querystring parameters accepted by your endpoint and the structure of the
 * page token, respectively.  Both of these are part of your API's public
 * interface, though the page token won't appear in the OpenAPI spec.  Be
 * careful when designing these structures to consider what you might want to
 * support in the future.
 */
#[derive(Debug, Deserialize, JsonSchema)]
pub struct PaginationParams<ScanParams, PageSelector>
where
    ScanParams: DeserializeOwned,
    PageSelector: DeserializeOwned + Serialize,
{
    /**
     * Specifies whether this is the first request in a scan or a subsequent
     * request, as well as the parameters provided
     *
     * See [`WhichPage`] for details.  Note that this field is flattened by
     * serde, so you have to look at the variants of [`WhichPage`] to see what
     * query parameters are actually processed here.
     */
    #[serde(flatten, deserialize_with = "deserialize_whichpage")]
    pub page: WhichPage<ScanParams, PageSelector>,

    /**
     * Client-requested limit on page size (optional)
     *
     * Consumers should use
     * [`RequestContext`][crate::handler::RequestContext::page_limit()]
     * to access this value.
     */
    #[schemars(
        description = "Maximum number of items returned by a single call"
    )]
    pub(crate) limit: Option<NonZeroU64>,
}

/*
 * Deserialize `WhichPage` for `PaginationParams`. We In REST APIs, callers
 * typically provide either the parameters to resume a scan (in our case, just
 * "page_token") or the parameters to begin a new one (which can be
 * any set of parameters that our consumer wants).  There's generally no
 * separate field to indicate which case they're requesting. We deserialize into
 * a generic map first and then either interpret the page token or deserialize
 * the map into ScanParams.
 */
fn deserialize_whichpage<'de, D, ScanParams, PageSelector>(
    deserializer: D,
) -> Result<WhichPage<ScanParams, PageSelector>, D::Error>
where
    D: Deserializer<'de>,
    ScanParams: DeserializeOwned,
    PageSelector: DeserializeOwned,
{
    let raw_params = BTreeMap::<String, String>::deserialize(deserializer)?;

    match raw_params.get("page_token") {
        Some(page_token) => {
            let page_start = deserialize_page_token(&page_token)
                .map_err(serde::de::Error::custom)?;
            Ok(WhichPage::Next(page_start))
        }
        None => {
            let scan_params =
                from_map(&raw_params).map_err(serde::de::Error::custom)?;
            Ok(WhichPage::First(scan_params))
        }
    }
}

/**
 * Describes whether the client is beginning a new scan or resuming an existing
 * one
 *
 * In either case, this type provides access to consumer-defined parameters for
 * the particular type of request.  See [`PaginationParams`] for more
 * information.
 */
#[derive(Debug)]
pub enum WhichPage<ScanParams, PageSelector> {
    /**
     * Indicates that the client is beginning a new scan
     *
     * `ScanParams` are the consumer-defined parameters for beginning a new scan
     * (e.g., filters, sort options, etc.)
     */
    First(ScanParams),

    /**
     * Indicates that the client is resuming a previous scan
     *
     * `PageSelector` are the consumer-defined parameters for resuming a
     * previous scan (e.g., any scan parameters, plus a marker to indicate the
     * last result seen by the client).
     */
    Next(PageSelector),
}

/*
 * Generate the JsonSchema for WhichPage from SchemaWhichPage.
 */
impl<ScanParams, PageSelector> JsonSchema
    for WhichPage<ScanParams, PageSelector>
where
    ScanParams: JsonSchema,
{
    fn schema_name() -> String {
        unimplemented!();
    }
    fn json_schema(
        gen: &mut schemars::gen::SchemaGenerator,
    ) -> schemars::schema::Schema {
        SchemaWhichPage::<ScanParams>::json_schema(gen)
    }
}

/**
 * `ScanParams` for use with `PaginationParams` when the API endpoint has no
 * scan parameters (i.e., it always iterates items in the collection in the same
 * way).
 */
#[derive(Debug, Deserialize, JsonSchema)]
pub struct EmptyScanParams {}

/**
 * The order in which the client wants to page through the requested collection
 */
#[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum PaginationOrder {
    Ascending,
    Descending,
}

/*
 * Token and querystring serialization and deserialization
 *
 * Page tokens essentially take the consumer's PageSelector struct, add a
 * version number, serialize that as JSON, and base64-encode the result.  This
 * token is returned in any response from a paginated API, and the client will
 * pass it back as a query parameter for subsequent pagination requests. This
 * approach allows us to rev the serialized form if needed (see
 * `PaginationVersion`) and add other metadata in a backwards-compatiable way.
 * It also emphasizes to clients that the token should be treated as opaque,
 * though it's obviously not resistant to tampering.
 */

/**
 * Maximum length of a page token once the consumer-provided type is serialized
 * and the result is base64-encoded
 *
 * We impose a maximum length primarily to prevent a client from making us parse
 * extremely large strings.  We apply this limit when we create tokens to avoid
 * handing out a token that can't be used.
 *
 * Note that these tokens are passed in the HTTP request line (before the
 * headers), and many HTTP implementations impose a limit as low as 8KiB on the
 * size of the request line and headers together, so it's a good idea to keep
 * this as small as we can.
 */
const MAX_TOKEN_LENGTH: usize = 512;

/**
 * Version for the pagination token serialization format
 *
 * This may seem like overkill, but it allows us to rev this in a future version
 * of Dropshot without breaking any ongoing scans when the change is deployed.
 * If we rev this, we might need to provide a way for clients to request at
 * runtime which version of token to generate so that if they do a rolling
 * upgrade of multiple instances, they can configure the instances to generate
 * v1 tokens until the rollout is complete, then switch on the new token
 * version.  Obviously, it would be better to avoid revving this version if
 * possible!
 *
 * Note that consumers still need to consider compatibility if they change their
 * own `ScanParams` or `PageSelector` types.
 */
#[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)]
#[serde(rename_all = "lowercase")]
enum PaginationVersion {
    V1,
}

/**
 * Parts of the pagination token that actually get serialized
 */
#[derive(Debug, Deserialize, Serialize)]
struct SerializedToken<PageSelector> {
    v: PaginationVersion,
    page_start: PageSelector,
}

/**
 * Construct a serialized page token from a consumer's page selector
 */
fn serialize_page_token<PageSelector: Serialize>(
    page_start: PageSelector,
) -> Result<String, HttpError> {
    let token_bytes = {
        let serialized_token = SerializedToken {
            v: PaginationVersion::V1,
            page_start: page_start,
        };

        let json_bytes =
            serde_json::to_vec(&serialized_token).map_err(|e| {
                HttpError::for_internal_error(format!(
                    "failed to serialize token: {}",
                    e
                ))
            })?;

        base64::encode_config(json_bytes, URL_SAFE)
    };

    /*
     * TODO-robustness is there a way for us to know at compile-time that
     * this won't be a problem?  What if we say that PageSelector has to be
     * Sized?  That won't guarantee that this will work, but wouldn't that
     * mean that if it ever works, then it will always work?  But would that
     * interface be a pain to use, given that variable-length strings are
     * very common in the token?
     */
    if token_bytes.len() > MAX_TOKEN_LENGTH {
        return Err(HttpError::for_internal_error(format!(
            "serialized token is too large ({} bytes, max is {})",
            token_bytes.len(),
            MAX_TOKEN_LENGTH
        )));
    }

    Ok(token_bytes)
}

/**
 * Deserialize a token from the given string into the consumer's page selector
 * type
 */
fn deserialize_page_token<PageSelector: DeserializeOwned>(
    token_str: &str,
) -> Result<PageSelector, String> {
    if token_str.len() > MAX_TOKEN_LENGTH {
        return Err(String::from(
            "failed to parse pagination token: too large",
        ));
    }

    let json_bytes = base64::decode_config(token_str.as_bytes(), URL_SAFE)
        .map_err(|e| format!("failed to parse pagination token: {}", e))?;

    /*
     * TODO-debugging: we don't want the user to have to know about the
     * internal structure of the token, so the error message here doesn't
     * say anything about that.  However, it would be nice if we could
     * create an internal error message that included the serde_json error,
     * which would have more context for someone looking at the server logs
     * to figure out what happened with this request.  Our own `HttpError`
     * supports this, but it seems like serde only preserves the to_string()
     * output of the error anyway.  It's not clear how else we could
     * propagate this information out.
     */
    let deserialized: SerializedToken<PageSelector> =
        serde_json::from_slice(&json_bytes).map_err(|_| {
            format!("failed to parse pagination token: corrupted token")
        })?;

    if deserialized.v != PaginationVersion::V1 {
        return Err(format!(
            "failed to parse pagination token: unsupported version: {:?}",
            deserialized.v,
        ));
    }

    Ok(deserialized.page_start)
}

/*
 * This is the on-the-wire protocol; we use this solely to generate the schema.
 */
#[derive(JsonSchema)]
#[allow(dead_code)]
#[serde(untagged)]
enum SchemaWhichPage<ScanParams> {
    Next { page_token: String },
    First(ScanParams),
}

#[cfg(test)]
mod test {
    use super::deserialize_page_token;
    use super::serialize_page_token;
    use super::PaginationParams;
    use super::ResultsPage;
    use super::WhichPage;
    use serde::de::DeserializeOwned;
    use serde::Deserialize;
    use serde::Serialize;
    use std::{fmt::Debug, num::NonZeroU64};

    #[test]
    fn test_page_token_serialization() {
        #[derive(Deserialize, Serialize)]
        struct MyToken {
            x: u16,
        }

        #[derive(Debug, Deserialize, Serialize)]
        struct MyOtherToken {
            x: u8,
        }

        /*
         * The most basic functionality is that if we serialize something and
         * then deserialize the result of that, we get back the original thing.
         */
        let before = MyToken {
            x: 1025,
        };
        let serialized = serialize_page_token(&before).unwrap();
        let after: MyToken = deserialize_page_token(&serialized).unwrap();
        assert_eq!(after.x, 1025);

        /*
         * We should also sanity-check that if we try to deserialize it as the
         * wrong type, that will fail.
         */
        let error =
            deserialize_page_token::<MyOtherToken>(&serialized).unwrap_err();
        assert!(error.contains("corrupted token"));

        /*
         * Try serializing the maximum possible size.  (This was empirically
         * determined at the time of this writing.)
         */
        #[derive(Debug, Deserialize, Serialize)]
        struct TokenWithStr {
            s: String,
        }
        let input = TokenWithStr {
            s: String::from_utf8(vec![b'e'; 352]).unwrap(),
        };
        let serialized = serialize_page_token(&input).unwrap();
        assert_eq!(serialized.len(), super::MAX_TOKEN_LENGTH);
        let output: TokenWithStr = deserialize_page_token(&serialized).unwrap();
        assert_eq!(input.s, output.s);

        /*
         * Error cases make up the rest of this test.
         *
         * Start by attempting to serialize a token larger than the maximum
         * allowed size.
         */
        let input = TokenWithStr {
            s: String::from_utf8(vec![b'e'; 353]).unwrap(),
        };
        let error = serialize_page_token(&input).unwrap_err();
        assert_eq!(error.status_code, http::StatusCode::INTERNAL_SERVER_ERROR);
        assert_eq!(error.external_message, "Internal Server Error");
        assert!(error
            .internal_message
            .contains("serialized token is too large"));

        /* Non-base64 */
        let error =
            deserialize_page_token::<TokenWithStr>("not base 64").unwrap_err();
        assert!(error.contains("failed to parse"));

        /* Non-JSON */
        let error =
            deserialize_page_token::<TokenWithStr>(&base64::encode("{"))
                .unwrap_err();
        assert!(error.contains("corrupted token"));

        /* Wrong top-level JSON type */
        let error =
            deserialize_page_token::<TokenWithStr>(&base64::encode("[]"))
                .unwrap_err();
        assert!(error.contains("corrupted token"));

        /* Structure does not match our general Dropshot schema. */
        let error =
            deserialize_page_token::<TokenWithStr>(&base64::encode("{}"))
                .unwrap_err();
        assert!(error.contains("corrupted token"));

        /* Bad version */
        let error = deserialize_page_token::<TokenWithStr>(&base64::encode(
            "{\"v\":11}",
        ))
        .unwrap_err();
        assert!(error.contains("corrupted token"));
    }

    /*
     * It's worth testing parsing around PaginationParams and WhichPage because
     * is a little non-trivial, owing to the use of untagged enums (which rely
     * on the ordering of fields), some optional fields, an extra layer of
     * indirection using `TryFrom`, etc.
     *
     * This is also the primary place where we test things like non-positive
     * values of "limit" being rejected, so even though the implementation in
     * our code is trivial, this functions more like an integration or system
     * test for those parameters.
     */
    #[test]
    fn test_pagparams_parsing() {
        #[derive(Debug, Deserialize, Serialize)]
        struct MyScanParams {
            the_field: String,
            only_good: Option<String>,
            how_many: u32,
            really: bool,
        }

        #[derive(Debug, Deserialize)]
        struct MyOptionalScanParams {
            the_field: Option<String>,
            only_good: Option<String>,
            how_many: Option<i32>,
            for_reals: Option<bool>,
        }

        #[derive(Debug, Serialize, Deserialize)]
        struct MyPageSelector {
            the_page: u8,
        }

        /*
         * "First page" cases
         */

        fn parse_as_first_page<T: DeserializeOwned + Debug>(
            querystring: &str,
        ) -> (T, Option<NonZeroU64>) {
            let pagparams: PaginationParams<T, MyPageSelector> =
                serde_urlencoded::from_str(querystring).unwrap();
            let limit = pagparams.limit;
            let scan_params = match pagparams.page {
                WhichPage::Next(..) => panic!("expected first page"),
                WhichPage::First(x) => x,
            };
            (scan_params, limit)
        }

        /* basic case: optional boolean specified, limit unspecified */
        let (scan, limit) = parse_as_first_page::<MyScanParams>(
            "the_field=name&only_good=true&how_many=42&really=false",
        );
        assert_eq!(scan.the_field, "name".to_string());
        assert_eq!(scan.only_good, Some("true".to_string()));
        assert_eq!(scan.how_many, 42);
        assert_eq!(scan.really, false);
        assert_eq!(limit, None);

        /* optional boolean specified but false, limit unspecified */
        let (scan, limit) = parse_as_first_page::<MyScanParams>(
            "the_field=&only_good=false&how_many=42&really=false",
        );
        assert_eq!(scan.the_field, "".to_string());
        assert_eq!(scan.only_good, Some("false".to_string()));
        assert_eq!(scan.how_many, 42);
        assert_eq!(scan.really, false);
        assert_eq!(limit, None);

        /* optional boolean unspecified, limit is valid */
        let (scan, limit) = parse_as_first_page::<MyScanParams>(
            "the_field=name&limit=3&how_many=42&really=false",
        );
        assert_eq!(scan.the_field, "name".to_string());
        assert_eq!(scan.only_good, None);
        assert_eq!(scan.how_many, 42);
        assert_eq!(scan.really, false);
        assert_eq!(limit.unwrap().get(), 3);

        /* empty query string when all parameters are optional */
        let (scan, limit) = parse_as_first_page::<MyOptionalScanParams>("");
        assert_eq!(scan.the_field, None);
        assert_eq!(scan.only_good, None);
        assert_eq!(limit, None);

        /* extra parameters are fine */
        let (scan, limit) = parse_as_first_page::<MyOptionalScanParams>(
            "the_field=name&limit=17&boomtown=okc&how_many=42",
        );
        assert_eq!(scan.the_field, Some("name".to_string()));
        assert_eq!(scan.only_good, None);
        assert_eq!(scan.how_many, Some(42));
        assert_eq!(limit.unwrap().get(), 17);

        /*
         * Error cases, including errors parsing first page parameters.
         *
         * TODO-polish The actual error messages for the following cases are
         * pretty poor, so we don't test them here, but we should clean these
         * up.
         */
        fn parse_as_error(querystring: &str) -> serde_urlencoded::de::Error {
            serde_urlencoded::from_str::<
                PaginationParams<MyScanParams, MyPageSelector>,
            >(querystring)
            .unwrap_err()
        }

        /* missing required field ("the_field") */
        parse_as_error("");
        /* invalid limit (number out of range) */
        parse_as_error("the_field=name&limit=0");
        parse_as_error("the_field=name&limit=-3");
        /* invalid limit (not a number) */
        parse_as_error("the_field=name&limit=abcd");
        /*
         * Invalid page token (bad base64 length)
         * Other test cases for deserializing tokens are tested elsewhere.
         */
        parse_as_error("page_token=q");

        /*
         * "Next page" cases
         */

        fn parse_as_next_page(
            querystring: &str,
        ) -> (MyPageSelector, Option<NonZeroU64>) {
            let pagparams: PaginationParams<MyScanParams, MyPageSelector> =
                serde_urlencoded::from_str(querystring).unwrap();
            let limit = pagparams.limit;
            let page_selector = match pagparams.page {
                WhichPage::Next(x) => x,
                WhichPage::First(_) => panic!("expected next page"),
            };
            (page_selector, limit)
        }

        /* basic case */
        let token = serialize_page_token(&MyPageSelector {
            the_page: 123,
        })
        .unwrap();
        let (page_selector, limit) =
            parse_as_next_page(&format!("page_token={}", token));
        assert_eq!(page_selector.the_page, 123);
        assert_eq!(limit, None);

        /* limit is also accepted */
        let (page_selector, limit) =
            parse_as_next_page(&format!("page_token={}&limit=12", token));
        assert_eq!(page_selector.the_page, 123);
        assert_eq!(limit.unwrap().get(), 12);

        /*
         * Having parameters appropriate to the scan params doesn't change the
         * way this is interpreted.
         */
        let (page_selector, limit) = parse_as_next_page(&format!(
            "the_field=name&page_token={}&limit=3",
            token
        ));
        assert_eq!(page_selector.the_page, 123);
        assert_eq!(limit.unwrap().get(), 3);

        /* invalid limits (same as above) */
        parse_as_error(&format!("page_token={}&limit=0", token));
        parse_as_error(&format!("page_token={}&limit=-3", token));

        /*
         * We ought not to promise much about what happens if the user's
         * ScanParams has a "page_token" field.  In practice, ours always takes
         * precedence (and it's not clear how else this could work).
         */
        #[derive(Debug, Deserialize)]
        struct SketchyScanParams {
            page_token: String,
        }

        let pagparams: PaginationParams<SketchyScanParams, MyPageSelector> =
            serde_urlencoded::from_str(&format!("page_token={}", token))
                .unwrap();
        assert_eq!(pagparams.limit, None);
        match &pagparams.page {
            WhichPage::First(..) => {
                panic!("expected NextPage even with page_token in ScanParams")
            }
            WhichPage::Next(p) => {
                assert_eq!(p.the_page, 123);
            }
        }
    }

    #[test]
    fn test_results_page() {
        /*
         * It would be a neat paginated fibonacci API if the page selector was
         * just the last two numbers!  Dropshot doesn't support that and it's
         * not clear that's a practical use case anyway.
         */
        let items = vec![1, 1, 2, 3, 5, 8, 13];
        let dummy_scan_params = 21;
        #[derive(Debug, Deserialize, Serialize)]
        struct FibPageSelector {
            prev: usize,
        }
        let get_page = |item: &usize, scan_params: &usize| FibPageSelector {
            prev: *item + *scan_params,
        };

        let results =
            ResultsPage::new(items.clone(), &dummy_scan_params, get_page)
                .unwrap();
        assert_eq!(results.items, items);
        assert!(results.next_page.is_some());
        let token = results.next_page.unwrap();
        let deserialized: FibPageSelector =
            deserialize_page_token(&token).unwrap();
        assert_eq!(deserialized.prev, 34);

        let results =
            ResultsPage::new(Vec::new(), &dummy_scan_params, get_page).unwrap();
        assert_eq!(results.items.len(), 0);
        assert!(results.next_page.is_none());
    }
}