diff --git a/cope2n-api/fwd_api/api/accuracy_view.py b/cope2n-api/fwd_api/api/accuracy_view.py index fc60961..dfb0554 100755 --- a/cope2n-api/fwd_api/api/accuracy_view.py +++ b/cope2n-api/fwd_api/api/accuracy_view.py @@ -284,6 +284,7 @@ class AccuracyViewSet(viewsets.ViewSet): return JsonResponse(status=status.HTTP_200_OK, data={"report_id": report_id}) + # Redundant, will be removed by 19 March 2024 @extend_schema( parameters=[ OpenApiParameter( @@ -417,6 +418,9 @@ class AccuracyViewSet(viewsets.ViewSet): acc[key] = report.combined_accuracy.get(key, 0) if report.combined_accuracy else max([fb, rv]) else: acc[key] = None + processing_time = report.average_OCR_time.get("avg", None) if report.average_OCR_time else None + if processing_time and processing_time == 0: + processing_time = None data.append({ "ID": report.id, "Created Date": report.created_at, @@ -429,7 +433,7 @@ class AccuracyViewSet(viewsets.ViewSet): "IMEI Acc": acc["imei_number"], "Avg. Accuracy": acc["avg"], "Avg. Client Request Time": report.average_client_time.get("avg", 0) if report.average_client_time else 0, - "Avg. OCR Processing Time": report.average_OCR_time.get("avg", 0) if report.average_OCR_time else 0, + "Avg. OCR Processing Time": processing_time, "report_id": report.report_id, "Subsidiary": map_subsidiary_short_to_long(report.subsidiary), }) @@ -544,7 +548,7 @@ class AccuracyViewSet(viewsets.ViewSet): for key in keys: if report_fine_data[i][key]: for x_key in report_fine_data[i][key].keys(): - report_fine_data[i][key][x_key] = report_fine_data[i][key][x_key]*100 + report_fine_data[i][key][x_key] = report_fine_data[i][key][x_key]*100 if report_fine_data[i][key][x_key] is not None else None overview_filename = _subsidiary + "_" + duration + ".xlsx" data_workbook = dict2xlsx(report_fine_data, _type='report') diff --git a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py index e450a25..084a200 100755 --- a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py +++ b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py @@ -125,6 +125,8 @@ def make_a_report(report_id, query_set): report.average_OCR_time = {"invoice": time_cost["invoice"](), "imei": time_cost["imei"](), "invoice_count": time_cost["invoice"].count, "imei_count": time_cost["imei"].count} + report.average_OCR_time["invoice"] = 0 if report.average_OCR_time["invoice"] is None else report.average_OCR_time["invoice"] + report.average_OCR_time["imei"] = 0 if report.average_OCR_time["imei"] is None else report.average_OCR_time["imei"] report.average_OCR_time["avg"] = (report.average_OCR_time["invoice"]*report.average_OCR_time["invoice_count"] + report.average_OCR_time["imei"]*report.average_OCR_time["imei_count"])/(report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) if (report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) > 0 else None report.number_imei_transaction = transaction_att.get("imei", 0) @@ -271,6 +273,8 @@ def create_accuracy_report(report_id, **kwargs): report.average_OCR_time = {"invoice": time_cost["invoice"](), "imei": time_cost["imei"](), "invoice_count": time_cost["invoice"].count, "imei_count": time_cost["imei"].count} + report.average_OCR_time["invoice"] = 0 if report.average_OCR_time["invoice"] is None else report.average_OCR_time["invoice"] + report.average_OCR_time["imei"] = 0 if report.average_OCR_time["imei"] is None else report.average_OCR_time["imei"] report.average_OCR_time["avg"] = (report.average_OCR_time["invoice"]*report.average_OCR_time["invoice_count"] + report.average_OCR_time["imei"]*report.average_OCR_time["imei_count"])/( report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) if (report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) > 0 else None report.number_imei_transaction = transaction_att.get("imei", 0) diff --git a/cope2n-api/fwd_api/celery_worker/process_result_tasks.py b/cope2n-api/fwd_api/celery_worker/process_result_tasks.py index 9277961..df68bae 100755 --- a/cope2n-api/fwd_api/celery_worker/process_result_tasks.py +++ b/cope2n-api/fwd_api/celery_worker/process_result_tasks.py @@ -146,7 +146,7 @@ def process_invoice_sbt_result(rq_id, result, metadata): page_index = int(rq_id.split("_sub_")[1]) rq_id = rq_id.split("_sub_")[0] rq: SubscriptionRequest = SubscriptionRequest.objects.filter(request_id=rq_id).first() - + result["metadata"] = metadata # status = to_status(result) @@ -171,7 +171,6 @@ def process_invoice_sbt_result(rq_id, result, metadata): rq.preprocessing_time = result.get("metadata", {}).get("preprocessing_time", 0) # advancing the last result rq.ai_inference_time = time.time() - rq.ai_inference_start_time rq.save() - else: rq.status = 404 # stop waiting rq.predict_result = result diff --git a/cope2n-api/fwd_api/management/commands/migrate-database-010224.py b/cope2n-api/fwd_api/management/commands/migrate-database-010224.py index 8788c3c..11befe3 100644 --- a/cope2n-api/fwd_api/management/commands/migrate-database-010224.py +++ b/cope2n-api/fwd_api/management/commands/migrate-database-010224.py @@ -101,8 +101,8 @@ class Command(BaseCommand): request.is_reviewed = False request.save() image.predict_result = _predict_result - image.feedback_result = _feedback_result - image.reviewed_result = _reviewed_result + # image.feedback_result = _feedback_result + # image.reviewed_result = _reviewed_result image.save() except Exception as e: self.stdout.write(self.style.ERROR(f"Request: {request.request_id} failed with {e}")) diff --git a/cope2n-api/fwd_api/utils/accuracy.py b/cope2n-api/fwd_api/utils/accuracy.py index ee258c0..346ea8b 100755 --- a/cope2n-api/fwd_api/utils/accuracy.py +++ b/cope2n-api/fwd_api/utils/accuracy.py @@ -848,6 +848,12 @@ def create_billing_data(subscription_requests): return billing_data def calculate_a_request(report, request): + def review_status_map(input): + review_status = {-1: "Not Required", + 0: "No", + 1: "Yes"} + return review_status.get(input, "N/A") + request_att = {"acc": {"feedback": {"imei_number": [], "purchase_date": [], "retailername": [], @@ -905,8 +911,8 @@ def calculate_a_request(report, request): if len(att["normalized_data"]["reviewed"].get("purchase_date", [])) > 0: image.predict_result["purchase_date"] = [att["normalized_data"]["reviewed"]["purchase_date"][i][0] for i in range(len(att["normalized_data"]["reviewed"]["purchase_date"]))] image.reviewed_result["purchase_date"] = att["normalized_data"]["reviewed"]["purchase_date"][rv_max_indexes["purchase_date"]][1] - if request.is_reviewed: - att["is_reviewed"] = 1 + # if request.is_reviewed: + # att["is_reviewed"] = 1 request_att["is_reviewed"].append(att["is_reviewed"]) new_report_file = ReportFile(report=report, subsidiary=_sub, @@ -920,7 +926,7 @@ def calculate_a_request(report, request): reviewed_accuracy=att["acc"]["reviewed"], acc=att["avg_acc"], is_bad_image=att["is_bad_image"], - is_reviewed= "Yes" if request.is_reviewed else "No", + is_reviewed= review_status_map(att["is_reviewed"]), time_cost=image.processing_time, bad_image_reason=image.reason, counter_measures=image.counter_measures, @@ -1014,6 +1020,10 @@ def calculate_subcription_file(subcription_request_file): avg_acc = avg_reviewed att["is_reviewed"] = 1 + # Little trick to overcome issue caused by misleading manually review process + if subcription_request_file.reason or subcription_request_file.counter_measures: + att["is_reviewed"] = 1 + att["avg_acc"] = avg_acc if avg_acc < settings.BAD_THRESHOLD: att["is_bad_image"] = True