3 回答
TA贡献1854条经验 获得超8个赞
为了详细说明我的代码气味注释,这里是我之前为解决此类问题所做的工作的粗略草稿。
def tokenize(abstract):
#return <set of words in abstract>
set_ = .....
return set_
candidates = (id, abstract, tokenize(abstract)) for abstract in Abstracts.items():
all_criterias = "netherlands vaccine trial".split()
def searcher(candidates, criteria, match_on_found=True):
search_results = []
for cand in candidates:
#cand[2] has a set of tokens or somesuch... abstract.
if criteria in cand[2]:
if match_on_found:
search_results.append(cand)
else:
#that's a AND NOT if you wanted that
search_results.append(cand)
return search_results
for criteria in all_criterias:
#pass in the full list every time, but it gets progressively shrunk
candidates = searcher(candidates, criteria)
#whats left is what you want
answer = [(abs[0],abs[1]) for abs in candidates]
TA贡献1876条经验 获得超7个赞
最终自己找到了解决方案。替换
finals.extend(documents.intersection(id_set_for_one_word))
return finals
和
documents = (documents.intersection(id_set_for_one_word))
return documents
似乎在这里工作。
不过,谢谢大家的努力。
TA贡献1877条经验 获得超6个赞
问题:返回文档摘要中单词的匹配文档列表
该term用min的数量documents,保持始终result。
如果 aterm在 中不存在inverted_index,则根本不匹配。
为简单起见,预定义数据:
Abstracts = {1: 'Lorem ipsum dolor sit amet,',
2: 'consetetur sadipscing elitr,',
3: 'sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat,',
4: 'sed diam voluptua.',
5: 'At vero eos et accusam et justo duo dolores et ea rebum.',
6: 'Stet clita kasd gubergren,',
7: 'no sea takimata sanctus est Lorem ipsum dolor sit amet.',
}
inverted_index = {'Stet': {6}, 'ipsum': {1, 7}, 'erat,': {3}, 'ut': {3}, 'dolores': {5}, 'gubergren,': {6}, 'kasd': {6}, 'ea': {5}, 'consetetur': {2}, 'sit': {1, 7}, 'nonumy': {3}, 'voluptua.': {4}, 'est': {7}, 'elitr,': {2}, 'At': {5}, 'rebum.': {5}, 'magna': {3}, 'sadipscing': {2}, 'diam': {3, 4}, 'dolore': {3}, 'sanctus': {7}, 'labore': {3}, 'sed': {3, 4}, 'takimata': {7}, 'Lorem': {1, 7}, 'invidunt': {3}, 'aliquyam': {3}, 'accusam': {5}, 'duo': {5}, 'amet.': {7}, 'et': {3, 5}, 'sea': {7}, 'dolor': {1, 7}, 'vero': {5}, 'no': {7}, 'eos': {5}, 'tempor': {3}, 'amet,': {1}, 'clita': {6}, 'justo': {5}, 'eirmod': {3}}
def and_query(tokens):
print("tokens:{}".format(tokens))
#terms = preprocess(tokenize(tokens))
terms = tokens.split()
term_min = None
for term in terms:
if term in inverted_index:
# Find min
if not term_min or term_min[0] > len(inverted_index[term]):
term_min = (len(inverted_index[term]), term)
else:
# Break early, if a term is not in inverted_index
return set()
finals = inverted_index[term_min[1]]
print("term_min:{} inverted_index:{}".format(term_min, finals))
return finals
def finals_print(finals):
if finals:
for final in finals:
print("Document [{}]:{}".format(final, Abstracts[final]))
else:
print("No matching Document found")
if __name__ == "__main__":
for tokens in ['sed diam voluptua.', 'Lorem ipsum dolor', 'Lorem ipsum dolor test']:
finals_print(and_query(tokens))
print()
输出:
tokens:sed diam voluptua.
term_min:(1, 'voluptua.') inverted_index:{4}
Document [4]:sed diam voluptua.
tokens:Lorem ipsum dolor
term_min:(2, 'Lorem') inverted_index:{1, 7}
Document [1]:Lorem ipsum dolor sit amet,
Document [7]:no sea takimata sanctus est Lorem ipsum dolor sit amet.
tokens:Lorem ipsum dolor test
No matching Document found
用 Python 测试:3.4.2
添加回答
举报