Started working on adding romanizations to DB
This commit is contained in:
		
							
								
								
									
										114
									
								
								xlit_server.py
									
									
									
									
									
								
							
							
						
						
									
										114
									
								
								xlit_server.py
									
									
									
									
									
								
							| @@ -195,61 +195,65 @@ def ulca_api(): | |||||||
|  |  | ||||||
| @app.route('/romanize', methods=['POST']) | @app.route('/romanize', methods=['POST']) | ||||||
| def romanizeHandler(): | def romanizeHandler(): | ||||||
| 	langCodeLookup = { |     langCodeLookup = { | ||||||
| 	"as": "as", |     "hi": "hi", | ||||||
| 	"bn": "bn", |     "bn": "bn", | ||||||
| 	"gom": "gom", |     "mr": "mr", | ||||||
| 	"gu": "gu", |     "ta": "ta", | ||||||
| 	"hi": "hi", |     "te": "te", | ||||||
| 	"kn": "kn", |     "kn": "kn", | ||||||
| 	"mai": "mai", |     "ml": "ml", | ||||||
| 	"ml": "ml", |     "or": "or", | ||||||
| 	"mni-Mtei": "mni", |     "gu": "gu", | ||||||
| 	"mr": "mr", |     "ur": "ur", | ||||||
| 	"ne": "ne", |     "as": "as", | ||||||
| 	"or": "or", |     "pa": "pa", | ||||||
| 	"pa": "pa", |     "mai": "mai", | ||||||
| 	"sd": "sd", |     "ne": "ne", | ||||||
| 	"ta": "ta", |     "gom": "gom", | ||||||
| 	"te": "te", |     "tcy": "kn", # Tulu uses Kannada script | ||||||
| 	"ur": "ur", |     "bho": "hi", # Bhojpuri uses Hindi script | ||||||
| 	"tcy": "kn", # Tulu uses Kannada script |     "doi": "hi", # Dogri uses Hindi script | ||||||
| 	"awa": "hi", # Awadhi uses Hindi script |     "mni-Mtei": "mni", | ||||||
| 	"doi": "hi", # Dogri uses Hindi script |     "sd": "sd", | ||||||
| 	"bho": "hi", # Bhojpuri uses Hindi script |     "awa": "hi", # Awadhi uses Hindi script | ||||||
| 	} |     } | ||||||
| 	rtv = dict() |     rtv = dict() | ||||||
|  |  | ||||||
| 	data = request.get_json(force=True) |     data = request.get_json(force=True) | ||||||
|  |  | ||||||
| 	# Check if database contains the romanizations already |     # Check if database contains the romanizations already | ||||||
| 	englishWord = data['en'] |     englishWord = data['en'] | ||||||
| 	print(englishWord) |     rtv["en"] = englishWord | ||||||
| 	con = sqlite3.connect("../translations.db") |     print(englishWord) | ||||||
| 	cur = con.cursor() |     con = sqlite3.connect("../translations.db") | ||||||
| 	cur.execute("CREATE TABLE IF NOT EXISTS romanizations AS SELECT * FROM translations WHERE 0") # Copy schema from 'translations' table |     cur = con.cursor() | ||||||
| 	cur.execute('SELECT * FROM romanizations WHERE english = ?', (englishWord,)) |     cur.execute("CREATE TABLE IF NOT EXISTS romanizations AS SELECT * FROM translations WHERE 0") # Copy schema from 'translations' table | ||||||
| 	romanizations = cur.fetchall() |     cur.execute('SELECT * FROM romanizations WHERE english = ?', (englishWord,)) | ||||||
| 	columnNames = [column[0] for column in cur.description] |     romanizations = cur.fetchall() | ||||||
| 	romanizationsDict = [] |     columnNames = [column[0] for column in cur.description] | ||||||
| 	if len(romanizations) > 0: |     romanizationsDict = [] | ||||||
| 		for row in romanizations: |     if len(romanizations) > 0: | ||||||
| 			row_dict = {columnNames[i]: row[i] for i in range(len(columns))} |         for row in romanizations: | ||||||
| 			romanizationsDict.append(row_dict) |             row_dict = {columnNames[i]: row[i] for i in range(len(langCodeLookup)+1)} # The '+1' is because of English, which isn't in langCodeLookup | ||||||
| 		json_data = json.dumps(romanizationsdata, indent=4) |             romanizationsDict.append(row_dict) | ||||||
| 		print(json_data) |         json_data = json.dumps(romanizationsdata, indent=4) | ||||||
| 	#	if len(romanizations) != 0: |         print(json_data) | ||||||
| 			 |     #    if len(romanizations) != 0: | ||||||
| 	# Assuming the romanizations didn't exist before |              | ||||||
| 	for key in data: |     # Assuming the romanizations didn't exist before | ||||||
| 		if key in langCodeLookup: |     for key in data: | ||||||
| 			langCode = langCodeLookup[key] |         if key in langCodeLookup: | ||||||
| 			text = data[key] |             langCode = langCodeLookup[key] | ||||||
| 			response = reverse_xlit_api(langCode, text) |             text = data[key] | ||||||
| 			responseJson = response.get_json() |             response = reverse_xlit_api(langCode, text) | ||||||
| 			rtv[key] = responseJson['result'] |             responseJson = response.get_json() | ||||||
| 	 |             rtv[key] = responseJson['result'] | ||||||
| 	rtvJson = jsonify(rtv) |      | ||||||
|  |     rtvJson = jsonify(rtv) | ||||||
|  |     rtv["en"] = englishWord | ||||||
|  |     cur.execute("INSERT INTO romanizations VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", tuple(rtv.values())) | ||||||
|  |     con.commit() | ||||||
|  |  | ||||||
| 	con.close() |     con.close() | ||||||
| 	return rtvJson |     return rtvJson | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user