Compare commits

..

17 Commits

Author SHA1 Message Date
Colin Goutte 6ea473d97a Start both unit and functionnal test? 2023-09-07 15:35:20 +02:00
Colin Goutte 3897771ecb Pagination works from begining to end 2023-08-28 15:58:42 +02:00
Colin Goutte 8ef89ecc85 GREEN: Had to fiddle a bit with schemas and typing 2023-08-28 15:58:42 +02:00
Colin Goutte a7975cf164 First and last pages pagination test 2023-08-28 15:58:42 +02:00
Colin Goutte f813dcc49f WIP 2023-08-28 15:58:42 +02:00
Colin Goutte 32c97664c6 First shot of paginaiton seems ok 2023-08-28 15:58:42 +02:00
Colin Goutte 2a4ac4961c RED: write test to query paginated view 2023-08-28 15:58:42 +02:00
Colin Goutte 99045993fe First shot of pagination, try not to brake anything 2023-08-28 15:58:42 +02:00
Colin Goutte 1565863b56 Test to search for a movie on crud 2023-08-27 21:04:52 +02:00
Colin Goutte d25a8c66e1 Merge branch 'feature_36_list_endpoint' 2023-08-27 20:55:22 +02:00
Colin Goutte 0c1bdba85f Add a test for count 2023-08-27 18:22:29 +02:00
Colin Goutte daec71fe70 Green: adapt output schema for 'movie' and 'count' 2023-08-27 18:17:17 +02:00
Colin Goutte 4acf79a0dd RED: Check expected output format for '/movies/' route 2023-08-27 17:59:05 +02:00
Colin Goutte 56844ff18d Add caveat notes 2023-08-27 17:53:48 +02:00
Colin Goutte a6da3c0ac7 Fix database script 2023-08-27 16:19:33 +02:00
Colin Goutte 94ad5b28e5 Sample input for the whole test 2023-08-27 16:16:36 +02:00
Colin Goutte c3434ec1f7 Merge branch 'feature_35_feed_database' 2023-08-27 16:05:59 +02:00
8 changed files with 272 additions and 23 deletions

6
CAVEATS_NOGOS Normal file
View File

@ -0,0 +1,6 @@
Nogo:
Voluntarly skip database migrations for the demo projet
Caveats:
Issue in database / session management for tests, out of scope to unserstand no

View File

@ -16,15 +16,14 @@ run_dev:
git ls-files | entr -r pipenv run python dev.py
tdd:
git ls-files | entr make test opt='$(opt)'
git ls-files | entr make functionnal_tests
git ls-files | entr make test functionnal_tests opt='$(opt)'
refactor_tdd:
make tdd opt="--pdb --ff --lf --ff -x"
watch_db:
watch "sqlite3 sql_app.db 'select count(*) from movies'"
watch "sqlite3 db_$(target).sqlite3 'select count(*) from movies'"
test:

View File

@ -37,9 +37,14 @@ def get_movie_by_name(db: Session, name: str = ""):
return db_movie.all()
def get_all_movies(db: Session):
def get_all_movies(db: Session, offset: int | None = None, limit: int | None = None):
db_movie = db.query(models.Movie)
return db_movie.all()
if offset is not None:
db_movie = db_movie.offset(offset)
if limit is not None:
db_movie = db_movie.limit(limit)
return db_movie
def get_movie_by_id(db: Session, id_: str = ""):

View File

@ -38,14 +38,25 @@ def adapt_movie_data(data_in: dict):
def fill_db(
db=SessionLocal(), movie_input_file: str = "input_data/movies_metadata.csv"
db=SessionLocal(),
movie_input_file: str = "input_data/movies_metadata.csv",
sample_rate=100,
):
import crud
import csv
import random
page_size = 1_00
def compute_rate(*dummy):
if 0 < sample_rate < 100:
return random.random() < (sample_rate / 100)
return True
with open(movie_input_file) as csvfile:
for count, movie_data in enumerate(csv.DictReader(csvfile), start=1):
for count, movie_data in enumerate(
filter(compute_rate, csv.DictReader(csvfile)), start=1
):
if count % page_size == 0:
db.commit()
@ -62,4 +73,4 @@ def fill_db(
if __name__ == "__main__":
create_db()
fill_db()
fill_db(sample_rate=1)

36
dev.py
View File

@ -114,8 +114,40 @@ async def delete_movie(id_: str, db: Session = Depends(get_db)) -> None:
@app.get("/movies/")
async def list_movie(db: Session = Depends(get_db)) -> list[schemas.MovieObject]:
return crud.get_all_movies(db)
async def list_movie(
db: Session = Depends(get_db),
pagenum: int | None = None,
pagesize: int | None = None,
) -> schemas.PaginatedMovies | schemas.MovieObjectsOut:
paginate_params = {}
paginate_data = {}
pagination_params = {"pagenum": pagenum, "pagesize": pagesize}
if any(v for v in pagination_params.values() if v is not None):
missing = [name for (name, value) in pagination_params.items() if not value]
if missing:
raise HTTPException(status_code=404, detail=f"Missing {missing}")
# Here we do a "x + 1 - 1 = x" trick to check if there will be more pages
# eg we want from 10 to 15, we ask for 10 to 16, if we have *stricly* more
# than 5 element we can now that there will be one more page
paginate_params = dict(offset=(pagenum - 1) * pagesize, limit=pagesize + 1)
movies = crud.get_all_movies(db, **paginate_params)
if paginate_params:
has_more_content = movies.count() > pagesize
paginate_data = {
"next_page": pagenum + 1 if has_more_content else None,
"previous_page": pagenum - 1 if pagenum > 1 else None,
}
movies = movies.limit(pagesize)
count = movies.count()
payload = {"movies": movies, "count": count}
return {**payload, **paginate_data}
if __name__ == "__main__":

View File

@ -14,3 +14,17 @@ class MoviePayload(BaseModel):
class MovieObject(MoviePayload):
id: int | str
class Paginated(BaseModel):
next_page: str | int | None
previous_page: str | int | None
class MovieObjectsOut(BaseModel):
movies: list[MovieObject]
count: int
class PaginatedMovies(MovieObjectsOut, Paginated):
pass

View File

@ -112,6 +112,7 @@ class BaseCrud(unittest.TestCase):
def test_list_movies(self):
response = client.get("/movies/")
assert response.status_code == 200
primary_count = response.json()["count"]
# assert response.json() == []
N = 10
@ -124,11 +125,162 @@ class BaseCrud(unittest.TestCase):
response = client.post("/movies/", json=self.create_payload)
assert response.status_code == 200
movies = client.get("/movies/")
movies_by_title = {m["title"]: m for m in movies.json()}
response = client.get("/movies/").json()
movies = response["movies"]
count = response["count"]
movies_by_title = {m["title"]: m for m in movies}
found = list(movies_by_title[title] for title in names)
assert all(movies_by_title[title] for title in names)
assert count == primary_count + N
def test_list_movies_payload_format(self):
response = client.get("/movies/")
assert response.status_code == 200
# assert response.json() == []
primary_count = response.json()["count"]
N = 10
names = []
for _ in range(N):
name = rand_name()
names.append(name)
self.create_payload["title"] = name
response = client.post("/movies/", json=self.create_payload)
assert response.status_code == 200
movies = client.get("/movies/").json()
assert isinstance(movies["count"], int)
assert isinstance(movies["movies"], list)
assert movies["count"] == primary_count + N
def test_list_pagination_limits(self):
response = client.get("/movies/")
nb_movies = response.json()["count"]
for _ in range(3):
self.create_payload["title"] = rand_name()
response = client.post("/movies/", json=self.create_payload)
response = client.get("/movies/")
nb_movies = response.json()["count"]
pagenum = 1
pagesize = nb_movies - 1
# Test page 1 has no previous ?
current_movies = client.get(
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
).json()
assert current_movies.get("previous_page") is None
assert current_movies["next_page"]
current_movies = client.get(
f"/movies/?pagenum={pagenum + 1 }&pagesize={pagesize}"
).json()
assert current_movies.get("next_page") is None
assert current_movies["previous_page"]
# test last page has no next
def test_list_movies_pagination_back_forth(self):
response = client.get("/movies/")
nb_movies = response.json()["count"]
for _ in range(3):
self.create_payload["title"] = rand_name()
response = client.post("/movies/", json=self.create_payload)
response = client.get("/movies/")
nb_movies = response.json()["count"]
pagenum = 1
pagesize = 2
first, *_, last = client.get("/movies/").json()["movies"]
while current_movies := client.get(
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
).json():
next_page_num = current_movies.get("next_page")
assert next_page_num != pagenum
if next_page_num is None:
assert current_movies["movies"][-1] == last
break
else:
assert next_page_num == pagenum + 1
pagenum = next_page_num
def test_list_movies_pagination(self):
response = client.get("/movies/")
assert response.status_code == 200
# assert response.json() == []
primary_count = response.json()["count"]
N = 10
names = []
for _ in range(N):
name = rand_name()
names.append(name)
self.create_payload["title"] = name
response = client.post("/movies/", json=self.create_payload)
assert response.status_code == 200
pagenum = 3
pagesize = 5
sliced_movies = client.get("/movies/").json()["movies"][
(pagenum - 1) * pagesize : pagenum * pagesize
]
sliced_titles = [m["title"] for m in sliced_movies]
movies_paginate = client.get(
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
).json()["movies"]
paginate_titles = [m["title"] for m in movies_paginate]
assert sliced_titles == paginate_titles
def test_list_movies_pagination(self):
response = client.get("/movies/")
assert response.status_code == 200
# assert response.json() == []
primary_count = response.json()["count"]
N = 10
names = []
for _ in range(N):
name = rand_name()
names.append(name)
self.create_payload["title"] = name
response = client.post("/movies/", json=self.create_payload)
assert response.status_code == 200
pagenum = 3
pagesize = 5
sliced_movies = client.get("/movies/").json()["movies"][
(pagenum - 1) * pagesize : pagenum * pagesize
]
sliced_titles = [m["title"] for m in sliced_movies]
movies_paginate = client.get(
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
).json()["movies"]
paginate_titles = [m["title"] for m in movies_paginate]
assert sliced_titles == paginate_titles
class ApiTestCase(unittest.TestCase):
def test_payload_content_in_and_out_loopback(self):

View File

@ -117,8 +117,38 @@ def test_list_movies():
names.append(name)
crud.create_movie(db, title=name, genres=["Animated", "Paropaganda"])
movies = client.get("movies")
movies_by_title = {m["title"]: m for m in movies.json()}
movies = client.get("movies").json()["movies"]
movies_by_title = {m["title"]: m for m in movies}
assert all(movies_by_title[name] for name in names)
def test_search_movies():
clear_db()
response = client.get("/movies/")
# assert response.json() == []
radix = rand_name()
name = radix + "test_search"
desc = radix + "test_desription"
with db_context() as db:
movie_name = crud.create_movie(
db, title=name, genres=["Animated", "Paropaganda"]
)
movie_desc = crud.create_movie(
db, title=radix, description=desc, genres=["Animated", "Paropaganda"]
)
for term, target in zip((name, desc), (movie_name, movie_desc)):
with db_context() as db:
found = crud.search_movie(db, desc).all()
assert len(found) == 1
assert target == found[0]
movies = client.get("movies").json()["movies"]
movies_by_title = {m["title"]: m for m in movies}
assert all(movies_by_title[name] for name in names)
@ -128,16 +158,16 @@ def test_sample_import_toy_story():
movie_title = "Toy Story"
file_path = "input_data/movies_metadata_short.csv"
movies = client.get("movies")
movies_by_title = {m["title"]: m for m in movies.json()}
movies = client.get("movies").json()["movies"]
movies_by_title = {m["title"]: m for m in movies}
assert movie_title not in movies_by_title, "The movie should not be pre existing"
with db_context() as db:
fill_db(db, file_path)
movies = client.get("movies")
movies_by_title = {m["title"]: m for m in movies.json()}
movies = client.get("movies").json()["movies"]
movies_by_title = {m["title"]: m for m in movies}
toy_story = movies_by_title["Toy Story"]
@ -156,12 +186,12 @@ def test_title_is_taken_form_original_title_is_missing():
file_path = "utests/movie_error_missing_title.csv"
file_path = "input_data/movies_metadata.csv"
movies = client.get("movies")
movies_by_title = {m["title"]: m for m in movies.json()}
movies = client.get("movies").json()["movies"]
movies_by_title = {m["title"]: m for m in movies}
assert movie_title not in movies_by_title, "The movie should not be pre existing"
with db_context() as db:
fill_db(db, file_path)
fill_db(db, file_path, sample_rate=1)
movies = client.get("movies")
movies_by_title = {m["title"]: m for m in movies.json()}
movies = client.get("movies").json()["movies"]
movies_by_title = {m["title"]: m for m in movies}