Compare commits
No commits in common. "pagination" and "feature_35_feed_database" have entirely different histories.
pagination
...
feature_35
|
@ -1,6 +0,0 @@
|
|||
Nogo:
|
||||
Voluntarly skip database migrations for the demo projet
|
||||
|
||||
|
||||
Caveats:
|
||||
Issue in database / session management for tests, out of scope to unserstand no
|
5
Makefile
5
Makefile
|
@ -16,14 +16,15 @@ run_dev:
|
|||
git ls-files | entr -r pipenv run python dev.py
|
||||
|
||||
tdd:
|
||||
git ls-files | entr make test functionnal_tests opt='$(opt)'
|
||||
git ls-files | entr make test opt='$(opt)'
|
||||
git ls-files | entr make functionnal_tests
|
||||
|
||||
refactor_tdd:
|
||||
make tdd opt="--pdb --ff --lf --ff -x"
|
||||
|
||||
|
||||
watch_db:
|
||||
watch "sqlite3 db_$(target).sqlite3 'select count(*) from movies'"
|
||||
watch "sqlite3 sql_app.db 'select count(*) from movies'"
|
||||
|
||||
|
||||
test:
|
||||
|
|
9
crud.py
9
crud.py
|
@ -37,14 +37,9 @@ def get_movie_by_name(db: Session, name: str = ""):
|
|||
return db_movie.all()
|
||||
|
||||
|
||||
def get_all_movies(db: Session, offset: int | None = None, limit: int | None = None):
|
||||
def get_all_movies(db: Session):
|
||||
db_movie = db.query(models.Movie)
|
||||
if offset is not None:
|
||||
db_movie = db_movie.offset(offset)
|
||||
if limit is not None:
|
||||
db_movie = db_movie.limit(limit)
|
||||
|
||||
return db_movie
|
||||
return db_movie.all()
|
||||
|
||||
|
||||
def get_movie_by_id(db: Session, id_: str = ""):
|
||||
|
|
17
database.py
17
database.py
|
@ -38,25 +38,14 @@ def adapt_movie_data(data_in: dict):
|
|||
|
||||
|
||||
def fill_db(
|
||||
db=SessionLocal(),
|
||||
movie_input_file: str = "input_data/movies_metadata.csv",
|
||||
sample_rate=100,
|
||||
db=SessionLocal(), movie_input_file: str = "input_data/movies_metadata.csv"
|
||||
):
|
||||
import crud
|
||||
import csv
|
||||
import random
|
||||
|
||||
page_size = 1_00
|
||||
|
||||
def compute_rate(*dummy):
|
||||
if 0 < sample_rate < 100:
|
||||
return random.random() < (sample_rate / 100)
|
||||
return True
|
||||
|
||||
with open(movie_input_file) as csvfile:
|
||||
for count, movie_data in enumerate(
|
||||
filter(compute_rate, csv.DictReader(csvfile)), start=1
|
||||
):
|
||||
for count, movie_data in enumerate(csv.DictReader(csvfile), start=1):
|
||||
if count % page_size == 0:
|
||||
db.commit()
|
||||
|
||||
|
@ -73,4 +62,4 @@ def fill_db(
|
|||
|
||||
if __name__ == "__main__":
|
||||
create_db()
|
||||
fill_db(sample_rate=1)
|
||||
fill_db()
|
||||
|
|
36
dev.py
36
dev.py
|
@ -114,40 +114,8 @@ async def delete_movie(id_: str, db: Session = Depends(get_db)) -> None:
|
|||
|
||||
|
||||
@app.get("/movies/")
|
||||
async def list_movie(
|
||||
db: Session = Depends(get_db),
|
||||
pagenum: int | None = None,
|
||||
pagesize: int | None = None,
|
||||
) -> schemas.PaginatedMovies | schemas.MovieObjectsOut:
|
||||
paginate_params = {}
|
||||
paginate_data = {}
|
||||
|
||||
pagination_params = {"pagenum": pagenum, "pagesize": pagesize}
|
||||
if any(v for v in pagination_params.values() if v is not None):
|
||||
missing = [name for (name, value) in pagination_params.items() if not value]
|
||||
if missing:
|
||||
raise HTTPException(status_code=404, detail=f"Missing {missing}")
|
||||
|
||||
# Here we do a "x + 1 - 1 = x" trick to check if there will be more pages
|
||||
# eg we want from 10 to 15, we ask for 10 to 16, if we have *stricly* more
|
||||
# than 5 element we can now that there will be one more page
|
||||
paginate_params = dict(offset=(pagenum - 1) * pagesize, limit=pagesize + 1)
|
||||
|
||||
movies = crud.get_all_movies(db, **paginate_params)
|
||||
|
||||
if paginate_params:
|
||||
has_more_content = movies.count() > pagesize
|
||||
paginate_data = {
|
||||
"next_page": pagenum + 1 if has_more_content else None,
|
||||
"previous_page": pagenum - 1 if pagenum > 1 else None,
|
||||
}
|
||||
|
||||
movies = movies.limit(pagesize)
|
||||
|
||||
count = movies.count()
|
||||
|
||||
payload = {"movies": movies, "count": count}
|
||||
return {**payload, **paginate_data}
|
||||
async def list_movie(db: Session = Depends(get_db)) -> list[schemas.MovieObject]:
|
||||
return crud.get_all_movies(db)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
14
schemas.py
14
schemas.py
|
@ -14,17 +14,3 @@ class MoviePayload(BaseModel):
|
|||
|
||||
class MovieObject(MoviePayload):
|
||||
id: int | str
|
||||
|
||||
|
||||
class Paginated(BaseModel):
|
||||
next_page: str | int | None
|
||||
previous_page: str | int | None
|
||||
|
||||
|
||||
class MovieObjectsOut(BaseModel):
|
||||
movies: list[MovieObject]
|
||||
count: int
|
||||
|
||||
|
||||
class PaginatedMovies(MovieObjectsOut, Paginated):
|
||||
pass
|
||||
|
|
|
@ -112,7 +112,6 @@ class BaseCrud(unittest.TestCase):
|
|||
def test_list_movies(self):
|
||||
response = client.get("/movies/")
|
||||
assert response.status_code == 200
|
||||
primary_count = response.json()["count"]
|
||||
# assert response.json() == []
|
||||
|
||||
N = 10
|
||||
|
@ -125,162 +124,11 @@ class BaseCrud(unittest.TestCase):
|
|||
response = client.post("/movies/", json=self.create_payload)
|
||||
assert response.status_code == 200
|
||||
|
||||
response = client.get("/movies/").json()
|
||||
|
||||
movies = response["movies"]
|
||||
count = response["count"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
movies = client.get("/movies/")
|
||||
movies_by_title = {m["title"]: m for m in movies.json()}
|
||||
found = list(movies_by_title[title] for title in names)
|
||||
assert all(movies_by_title[title] for title in names)
|
||||
|
||||
assert count == primary_count + N
|
||||
|
||||
def test_list_movies_payload_format(self):
|
||||
response = client.get("/movies/")
|
||||
assert response.status_code == 200
|
||||
# assert response.json() == []
|
||||
primary_count = response.json()["count"]
|
||||
|
||||
N = 10
|
||||
names = []
|
||||
for _ in range(N):
|
||||
name = rand_name()
|
||||
|
||||
names.append(name)
|
||||
self.create_payload["title"] = name
|
||||
response = client.post("/movies/", json=self.create_payload)
|
||||
assert response.status_code == 200
|
||||
|
||||
movies = client.get("/movies/").json()
|
||||
|
||||
assert isinstance(movies["count"], int)
|
||||
assert isinstance(movies["movies"], list)
|
||||
assert movies["count"] == primary_count + N
|
||||
|
||||
def test_list_pagination_limits(self):
|
||||
response = client.get("/movies/")
|
||||
nb_movies = response.json()["count"]
|
||||
|
||||
for _ in range(3):
|
||||
self.create_payload["title"] = rand_name()
|
||||
response = client.post("/movies/", json=self.create_payload)
|
||||
|
||||
response = client.get("/movies/")
|
||||
nb_movies = response.json()["count"]
|
||||
|
||||
pagenum = 1
|
||||
pagesize = nb_movies - 1
|
||||
|
||||
# Test page 1 has no previous ?
|
||||
current_movies = client.get(
|
||||
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
|
||||
).json()
|
||||
assert current_movies.get("previous_page") is None
|
||||
assert current_movies["next_page"]
|
||||
|
||||
current_movies = client.get(
|
||||
f"/movies/?pagenum={pagenum + 1 }&pagesize={pagesize}"
|
||||
).json()
|
||||
assert current_movies.get("next_page") is None
|
||||
assert current_movies["previous_page"]
|
||||
|
||||
# test last page has no next
|
||||
|
||||
def test_list_movies_pagination_back_forth(self):
|
||||
response = client.get("/movies/")
|
||||
nb_movies = response.json()["count"]
|
||||
|
||||
for _ in range(3):
|
||||
self.create_payload["title"] = rand_name()
|
||||
response = client.post("/movies/", json=self.create_payload)
|
||||
|
||||
response = client.get("/movies/")
|
||||
nb_movies = response.json()["count"]
|
||||
|
||||
pagenum = 1
|
||||
pagesize = 2
|
||||
|
||||
first, *_, last = client.get("/movies/").json()["movies"]
|
||||
|
||||
while current_movies := client.get(
|
||||
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
|
||||
).json():
|
||||
next_page_num = current_movies.get("next_page")
|
||||
assert next_page_num != pagenum
|
||||
|
||||
if next_page_num is None:
|
||||
assert current_movies["movies"][-1] == last
|
||||
break
|
||||
else:
|
||||
assert next_page_num == pagenum + 1
|
||||
pagenum = next_page_num
|
||||
|
||||
def test_list_movies_pagination(self):
|
||||
response = client.get("/movies/")
|
||||
assert response.status_code == 200
|
||||
# assert response.json() == []
|
||||
primary_count = response.json()["count"]
|
||||
|
||||
N = 10
|
||||
names = []
|
||||
for _ in range(N):
|
||||
name = rand_name()
|
||||
|
||||
names.append(name)
|
||||
self.create_payload["title"] = name
|
||||
response = client.post("/movies/", json=self.create_payload)
|
||||
assert response.status_code == 200
|
||||
|
||||
pagenum = 3
|
||||
pagesize = 5
|
||||
|
||||
sliced_movies = client.get("/movies/").json()["movies"][
|
||||
(pagenum - 1) * pagesize : pagenum * pagesize
|
||||
]
|
||||
|
||||
sliced_titles = [m["title"] for m in sliced_movies]
|
||||
|
||||
movies_paginate = client.get(
|
||||
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
|
||||
).json()["movies"]
|
||||
|
||||
paginate_titles = [m["title"] for m in movies_paginate]
|
||||
|
||||
assert sliced_titles == paginate_titles
|
||||
|
||||
def test_list_movies_pagination(self):
|
||||
response = client.get("/movies/")
|
||||
assert response.status_code == 200
|
||||
# assert response.json() == []
|
||||
primary_count = response.json()["count"]
|
||||
|
||||
N = 10
|
||||
names = []
|
||||
for _ in range(N):
|
||||
name = rand_name()
|
||||
|
||||
names.append(name)
|
||||
self.create_payload["title"] = name
|
||||
response = client.post("/movies/", json=self.create_payload)
|
||||
assert response.status_code == 200
|
||||
|
||||
pagenum = 3
|
||||
pagesize = 5
|
||||
|
||||
sliced_movies = client.get("/movies/").json()["movies"][
|
||||
(pagenum - 1) * pagesize : pagenum * pagesize
|
||||
]
|
||||
|
||||
sliced_titles = [m["title"] for m in sliced_movies]
|
||||
|
||||
movies_paginate = client.get(
|
||||
f"/movies/?pagenum={pagenum}&pagesize={pagesize}"
|
||||
).json()["movies"]
|
||||
|
||||
paginate_titles = [m["title"] for m in movies_paginate]
|
||||
|
||||
assert sliced_titles == paginate_titles
|
||||
|
||||
|
||||
class ApiTestCase(unittest.TestCase):
|
||||
def test_payload_content_in_and_out_loopback(self):
|
||||
|
|
|
@ -117,38 +117,8 @@ def test_list_movies():
|
|||
names.append(name)
|
||||
crud.create_movie(db, title=name, genres=["Animated", "Paropaganda"])
|
||||
|
||||
movies = client.get("movies").json()["movies"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
assert all(movies_by_title[name] for name in names)
|
||||
|
||||
|
||||
def test_search_movies():
|
||||
clear_db()
|
||||
response = client.get("/movies/")
|
||||
# assert response.json() == []
|
||||
|
||||
radix = rand_name()
|
||||
|
||||
name = radix + "test_search"
|
||||
|
||||
desc = radix + "test_desription"
|
||||
|
||||
with db_context() as db:
|
||||
movie_name = crud.create_movie(
|
||||
db, title=name, genres=["Animated", "Paropaganda"]
|
||||
)
|
||||
movie_desc = crud.create_movie(
|
||||
db, title=radix, description=desc, genres=["Animated", "Paropaganda"]
|
||||
)
|
||||
|
||||
for term, target in zip((name, desc), (movie_name, movie_desc)):
|
||||
with db_context() as db:
|
||||
found = crud.search_movie(db, desc).all()
|
||||
assert len(found) == 1
|
||||
assert target == found[0]
|
||||
|
||||
movies = client.get("movies").json()["movies"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
movies = client.get("movies")
|
||||
movies_by_title = {m["title"]: m for m in movies.json()}
|
||||
assert all(movies_by_title[name] for name in names)
|
||||
|
||||
|
||||
|
@ -158,16 +128,16 @@ def test_sample_import_toy_story():
|
|||
movie_title = "Toy Story"
|
||||
file_path = "input_data/movies_metadata_short.csv"
|
||||
|
||||
movies = client.get("movies").json()["movies"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
movies = client.get("movies")
|
||||
movies_by_title = {m["title"]: m for m in movies.json()}
|
||||
|
||||
assert movie_title not in movies_by_title, "The movie should not be pre existing"
|
||||
|
||||
with db_context() as db:
|
||||
fill_db(db, file_path)
|
||||
|
||||
movies = client.get("movies").json()["movies"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
movies = client.get("movies")
|
||||
movies_by_title = {m["title"]: m for m in movies.json()}
|
||||
|
||||
toy_story = movies_by_title["Toy Story"]
|
||||
|
||||
|
@ -186,12 +156,12 @@ def test_title_is_taken_form_original_title_is_missing():
|
|||
|
||||
file_path = "utests/movie_error_missing_title.csv"
|
||||
file_path = "input_data/movies_metadata.csv"
|
||||
movies = client.get("movies").json()["movies"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
movies = client.get("movies")
|
||||
movies_by_title = {m["title"]: m for m in movies.json()}
|
||||
|
||||
assert movie_title not in movies_by_title, "The movie should not be pre existing"
|
||||
with db_context() as db:
|
||||
fill_db(db, file_path, sample_rate=1)
|
||||
fill_db(db, file_path)
|
||||
|
||||
movies = client.get("movies").json()["movies"]
|
||||
movies_by_title = {m["title"]: m for m in movies}
|
||||
movies = client.get("movies")
|
||||
movies_by_title = {m["title"]: m for m in movies.json()}
|
||||
|
|
Loading…
Reference in New Issue