Files
consumocuidado-server/products/views.py

216 lines
9.1 KiB
Python

import logging
import csv
import datetime
import operator
from functools import reduce
from django.shortcuts import render
from django.conf import settings
from django.db.models import Q
from django.core import serializers
# Create your views here.
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAdminUser, IsAuthenticated
from rest_framework.decorators import api_view, permission_classes, action
import requests
from products.models import Product
from products.serializers import ProductSerializer, TagFilterSerializer, SearchResultSerializer
from companies.models import Company
from history.models import HistorySync
from back_latienda.permissions import IsCreator
from .utils import extract_search_filters, find_related_products_v3, find_related_products_v6
from utils.tag_serializers import TaggitSerializer
from utils.tag_filters import ProductTagFilter
logging.basicConfig(
filename='logs/product-load.log',
filemode='w',
format='%(levelname)s:%(message)s',
level=logging.INFO,
)
class ProductViewSet(viewsets.ModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsCreator]
filterset_class = ProductTagFilter
filterset_fields = ['name', 'tags', 'category', 'attributes', 'company', 'created']
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
@action(detail=True, methods=['GET',])
def related(request):
# TODO: find the most similar products
return Response(data=[])
@api_view(['GET',])
@permission_classes([IsAuthenticated,])
def my_products(request):
qs = Product.objects.filter(creator=request.user)
product_serializer = ProductSerializer(qs, many=True)
return Response(data=product_serializer.data)
@api_view(['POST',])
@permission_classes([IsAuthenticated,])
def load_coop_products(request):
"""Read CSV file being received
Parse it to create products for related Company
"""
try:
csv_file = request.FILES['csv_file']
if csv_file.name.endswith('.csv') is not True:
logging.error(f"File {csv_file.name} is not a CSV file")
return Response({"errors":{"details": "File is not CSV type"}})
logging.info(f"Reading contents of {csv_file.name}")
decoded_file = csv_file.read().decode('utf-8').splitlines()
csv_reader = csv.DictReader(decoded_file, delimiter=',')
counter = 0
# get company linked to user
company_qs = Company.objects.filter(creator=request.user)
if company_qs:
company = company_qs.first()
else:
return Response({"errors":{"details": "Your user has no company to add products to"}})
# create historysync instance
history = HistorySync.objects.create(company=company, sync_date=datetime.datetime.now(), quantity=len(decoded_file))
for row in csv_reader:
if '' in (row['nombre-producto'], row['descripcion'], row['precio'], row['categoria']):
logging.error(f"Required data missing: {row}")
continue
try:
# download image references in csv
if row['imagen'].strip() != '':
image_url = row['imagen'].strip()
response = requests.get(image_url, stream=True)
if response.status_code == 200:
path = f"{setting.BASE_DIR}media/{row['nombre-producto'].strip()}.{image.url.split('/')[-1]}"
logging.info(f"Saving product image to: {path}")
new_image = open(path, 'wb')
for chunk in response:
new_image.write(chunk)
new_image.close()
else:
logging.warning(f"Image URL did not work: {image_url}")
new_image = None
else:
new_image = None
# TODO: if tags is empty, auto-generate tags
# assemble instance data
product_data = {
'id': None if row['id'].strip()=='' else row['id'].strip(),
'company': Company.objects.filter(creator=request.user).first(),
'name': row['nombre-producto'].strip(),
'description': row['descripcion'].strip(),
'image': new_image,
'url': row['url'].strip(),
'price': row['precio'].strip(),
'shipping_cost': row['gastos-envio'].strip(),
'shipping_terms': row['cond-envio'].strip(),
'discount': row['descuento'].strip(),
'stock': row['stock'].strip(),
'tags': row['tags'].strip(),
'category': row['categoria'].strip(),
'identifiers': row['identificadores'].strip(),
'history': history,
'creator': request.user,
}
Product.objects.create(**product_data)
logging.info(f"Created Product: {product_data}")
counter += 1
except Exception as e:
logging.error(f"Could not parse {row}")
return Response()
except Exception as e:
return Response({"errors": {"details": str(type(e))}}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET',]) # include allowed methods
def product_search(request):
"""
Takes a string of data, return relevant products
Params:
- query_string: used for search [MANDATORY]
- limit: max number of returned instances [OPTIONAL]
- offset: where to start counting results [OPTIONAL]
- shipping_cost: true/false
"""
query_string = request.GET.get('query_string', None)
limit = request.GET.get('limit', None)
offset = request.GET.get('offset', None)
shipping_cost = request.GET.get('shipping_cost', None)
if shipping_cost is not None:
if shipping_cost == 'true':
shipping_cost = True
elif shipping_cost == 'false':
shipping_cost = False
else:
shipping_cost = None
discount = request.GET.get('discount', None)
if discount is not None:
if discount == 'true':
discount = True
elif discount == 'false':
discount = False
else:
discount = None
if query_string is None:
return Response({"errors": {"details": "No query string to parse"}})
elif query_string is '':
# return everything
serializer = ProductSerializer(Product.objects.all(), many=True)
products = serializer.data
# filters = extract_search_filters(products)
return Response(data={"filters": [], "count": len(products), "products": products})
try:
# we collect our results here
result_set = set()
# split query string into single words
chunks = query_string.split(' ')
for chunk in chunks:
product_set = find_related_products_v6(chunk, shipping_cost, discount)
# add to result set
result_set.update(product_set)
# TODO: add search for entire phrase ???
# extract filters from result_set
filters = extract_search_filters(result_set)
# order results by RANK
result_list = list(result_set)
ranked_products = sorted(result_list, key= lambda rank:rank.rank, reverse=True)
serializer = SearchResultSerializer(ranked_products, many=True)
product_results = [dict(i) for i in serializer.data]
total_results = len(product_results)
# RESULTS PAGINATION
if limit is not None and offset is not None:
limit = int(limit)
offset = int(offset)
product_results = product_results[offset:(limit+offset)]
elif limit is not None:
limit = int(limit)
product_results = product_results[:limit]
return Response(data={"filters": filters, "count": total_results, "products": product_results})
except Exception as e:
return Response({"errors": {"details": str(e)}}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)