diff --git a/xlit_server.py b/xlit_server.py index ef6c54b..4232433 100644 --- a/xlit_server.py +++ b/xlit_server.py @@ -195,61 +195,65 @@ def ulca_api(): @app.route('/romanize', methods=['POST']) def romanizeHandler(): - langCodeLookup = { - "as": "as", - "bn": "bn", - "gom": "gom", - "gu": "gu", - "hi": "hi", - "kn": "kn", - "mai": "mai", - "ml": "ml", - "mni-Mtei": "mni", - "mr": "mr", - "ne": "ne", - "or": "or", - "pa": "pa", - "sd": "sd", - "ta": "ta", - "te": "te", - "ur": "ur", - "tcy": "kn", # Tulu uses Kannada script - "awa": "hi", # Awadhi uses Hindi script - "doi": "hi", # Dogri uses Hindi script - "bho": "hi", # Bhojpuri uses Hindi script - } - rtv = dict() - - data = request.get_json(force=True) - - # Check if database contains the romanizations already - englishWord = data['en'] - print(englishWord) - con = sqlite3.connect("../translations.db") - cur = con.cursor() - cur.execute("CREATE TABLE IF NOT EXISTS romanizations AS SELECT * FROM translations WHERE 0") # Copy schema from 'translations' table - cur.execute('SELECT * FROM romanizations WHERE english = ?', (englishWord,)) - romanizations = cur.fetchall() - columnNames = [column[0] for column in cur.description] - romanizationsDict = [] - if len(romanizations) > 0: - for row in romanizations: - row_dict = {columnNames[i]: row[i] for i in range(len(columns))} - romanizationsDict.append(row_dict) - json_data = json.dumps(romanizationsdata, indent=4) - print(json_data) - # if len(romanizations) != 0: - - # Assuming the romanizations didn't exist before - for key in data: - if key in langCodeLookup: - langCode = langCodeLookup[key] - text = data[key] - response = reverse_xlit_api(langCode, text) - responseJson = response.get_json() - rtv[key] = responseJson['result'] - - rtvJson = jsonify(rtv) - - con.close() - return rtvJson + langCodeLookup = { + "hi": "hi", + "bn": "bn", + "mr": "mr", + "ta": "ta", + "te": "te", + "kn": "kn", + "ml": "ml", + "or": "or", + "gu": "gu", + "ur": "ur", + "as": "as", + "pa": "pa", + "mai": "mai", + "ne": "ne", + "gom": "gom", + "tcy": "kn", # Tulu uses Kannada script + "bho": "hi", # Bhojpuri uses Hindi script + "doi": "hi", # Dogri uses Hindi script + "mni-Mtei": "mni", + "sd": "sd", + "awa": "hi", # Awadhi uses Hindi script + } + rtv = dict() + + data = request.get_json(force=True) + + # Check if database contains the romanizations already + englishWord = data['en'] + rtv["en"] = englishWord + print(englishWord) + con = sqlite3.connect("../translations.db") + cur = con.cursor() + cur.execute("CREATE TABLE IF NOT EXISTS romanizations AS SELECT * FROM translations WHERE 0") # Copy schema from 'translations' table + cur.execute('SELECT * FROM romanizations WHERE english = ?', (englishWord,)) + romanizations = cur.fetchall() + columnNames = [column[0] for column in cur.description] + romanizationsDict = [] + if len(romanizations) > 0: + for row in romanizations: + row_dict = {columnNames[i]: row[i] for i in range(len(langCodeLookup)+1)} # The '+1' is because of English, which isn't in langCodeLookup + romanizationsDict.append(row_dict) + json_data = json.dumps(romanizationsdata, indent=4) + print(json_data) + # if len(romanizations) != 0: + + # Assuming the romanizations didn't exist before + for key in data: + if key in langCodeLookup: + langCode = langCodeLookup[key] + text = data[key] + response = reverse_xlit_api(langCode, text) + responseJson = response.get_json() + rtv[key] = responseJson['result'] + + rtvJson = jsonify(rtv) + rtv["en"] = englishWord + cur.execute("INSERT INTO romanizations VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", tuple(rtv.values())) + con.commit() + + con.close() + return rtvJson