# API人脸 **Repository Path**: AliceZING/api-face ## Basic Information - **Project Name**: API人脸 - **Description**: No description available - **Primary Language**: Unknown - **License**: Not specified - **Default Branch**: master - **Homepage**: None - **GVP Project**: No ## Statistics - **Stars**: 0 - **Forks**: 0 - **Created**: 2020-10-24 - **Last Updated**: 2021-01-28 ## Categories & Tags **Categories**: Uncategorized **Tags**: None ## README # API运用 ## Azure ``` import requests # 1、create 列表 # faceListId faceListId ="zy03" create_facelists_url = "https://api-zy.cognitiveservices.azure.com/face/v1.0/facelists/{}" subscription_key = "e86d9bb9e8d942ca94b739af0306ef94" assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } data = { "name":"相册集", "userDate":"11人,10男,1女", "recognitionModel":"recognition_03", } r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data) import requests # 1、create 列表 # faceListId faceListId ="zy03" create_facelists_url = "https://api-zy.cognitiveservices.azure.com/face/v1.0/facelists/{}" subscription_key = "e86d9bb9e8d942ca94b739af0306ef94" assert subscription_key ​ headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } data = { "name":"相册集", "userDate":"11人,10男,1女", "recognitionModel":"recognition_03", } ​ r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data) ​ r_create.content #b'' # 2、Add face add_face_url = "https://api-zy.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedFaces" subscription_key = "e86d9bb9e8d942ca94b739af0306ef94" assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } img_url = "https://wx3.sinaimg.cn/mw690/ed4d61ably1ge90p0nsoqj20u011i439.jpg" ​ params_add_face={ "userData":"Tay" } ​ r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url}) r_add_face.status_code #200 # 封装成函数方便添加图片 def AddFace(img_url=str,userData=str): add_face_url ="https://api-zy.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedFaces" assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } img_url = img_url ​ params_add_face={ "userData":userData } r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url}) return r_add_face.status_code#返回出状态码 AddFace("https://wx2.sinaimg.cn/mw690/006UkIMQgy1gilovpedhzj30u011ijv3.jpg","Lauv") AddFace("https://wx2.sinaimg.cn/mw690/ecff9b1aly1gh1xv6j4lcj20u011h44a.jpg","Blackbear") AddFace("https://ww4.sinaimg.cn/bmiddle/99a80fedly1gk0a5bswkzj20u011htcc.jpg","PostMalone") AddFace("https://ww4.sinaimg.cn/bmiddle/e9281103ly1gjs2pqttj2j20u011idmr.jpg","ConanGray") AddFace("https://wx3.sinaimg.cn/mw690/006ST4imgy1gjzivxp70oj30to0jsdit.jpg","MGK") AddFace("https://ww3.sinaimg.cn/bmiddle/005KrlAhly1ggue5t1klyj30u018pn6u.jpg","Logic") AddFace("https://ww2.sinaimg.cn/bmiddle/bf26cc17ly1gjj048l7dej20v912ok93.jpg","lany") AddFace("https://wx4.sinaimg.cn/mw690/005QABcggy1ghqiu30pkyj30ld0fdtb0.jpg","LewisCapaldi") AddFace("https://wx3.sinaimg.cn/mw690/005Rug7vly1gf6tp1c2s9j30u011in32.jpg","Christopher") AddFace("https://wx2.sinaimg.cn/mw690/007Su2Scly1gjuky00vvyj30u011hqix.jpg","JB") ​ #200 # Get facelist get_facelist_url = "https://api-zy.cognitiveservices.azure.com/face/v1.0/facelists/zy03" r_get_facelist = requests.get(get_facelist_url,headers=headers)#学生填写 r_get_facelist.json() #{'persistedFaces': [{'persistedFaceId': '4f7d29aa-3328-4db4-aa99-078ca099f06b', 'userData': 'Tay'}, #{'persistedFaceId': 'c0369103-92a9-45da-9360-1032217e2a58', 'userData': 'Blackbear'}, #{'persistedFaceId': 'cdab2d9e-a699-4ea8-bfea-f055b9e59efa', 'userData': 'PostMalone'}, #{'persistedFaceId': '5183429e-0f90-4f75-8b8a-e75e497f68ef', 'userData': 'ConanGray'}, #{'persistedFaceId': '217752cd-2fb2-4f91-a454-8286f9cbb872', 'userData': 'Logic'}, #{'persistedFaceId': 'e65041fc-42c4-4f9c-bfa5-964088a7e432', 'userData': 'lany'}, #{'persistedFaceId': '00608469-807f-46a5-a203-11610c8944f8', 'userData': 'LewisCapaldi'}, #{'persistedFaceId': '27dcf411-1e11-410c-b3a7-755d38ddc758', 'userData': 'Christopher'}, #{'persistedFaceId': '3ae39d18-995e-4d3a-ad92-5bddf521d183', 'userData': 'JB'}, #{'persistedFaceId': '7b982ff5-cf29-432b-863d-8aa70758eeed', 'userData': 'Blackbear'}, #{'persistedFaceId': 'ef692497-e61f-4302-895d-43d850aa12ca', 'userData': 'PostMalone'}, #{'persistedFaceId': 'fc594817-b796-458e-86d8-a7aa0171d75c', 'userData': 'ConanGray'}, #{'persistedFaceId': 'b0d27b78-2b7e-449a-af9c-a6b906c647fb', 'userData': 'Logic'}, #{'persistedFaceId': 'd54ae7ec-858c-4c61-88f3-2086c603e13b', 'userData': 'lany'}, #{'persistedFaceId': '4b9d6a82-8dee-46f0-8ddb-aa58c86019d2', 'userData': 'LewisCapaldi'}, #{'persistedFaceId': '5d0fd1d0-1130-47e8-a91b-0200e34545c1', 'userData': 'Christopher'}, #{'persistedFaceId': 'ce6e1f1b-1011-4cdc-bb62-c261ea34ea28', 'userData': 'JB'}], 'faceListId': 'zy03', 'name': '相册集'} faceId = r_get_facelist.json()['persistedFaces'] #faceId #[{'persistedFaceId': '4f7d29aa-3328-4db4-aa99-078ca099f06b', 'userData': 'Tay'}, #{'persistedFaceId': 'c0369103-92a9-45da-9360-1032217e2a58', 'userData': 'Blackbear'}, #{'persistedFaceId': 'cdab2d9e-a699-4ea8-bfea-f055b9e59efa', 'userData': 'PostMalone'}, #{'persistedFaceId': '5183429e-0f90-4f75-8b8a-e75e497f68ef', 'userData': 'ConanGray'}, #{'persistedFaceId': '217752cd-2fb2-4f91-a454-8286f9cbb872', 'userData': 'Logic'}, #{'persistedFaceId': 'e65041fc-42c4-4f9c-bfa5-964088a7e432', 'userData': 'lany'}, #{'persistedFaceId': '00608469-807f-46a5-a203-11610c8944f8', 'userData': 'LewisCapaldi'}, #{'persistedFaceId': '27dcf411-1e11-410c-b3a7-755d38ddc758', 'userData': 'Christopher'}, #{'persistedFaceId': '3ae39d18-995e-4d3a-ad92-5bddf521d183', 'userData': 'JB'}, #{'persistedFaceId': '7b982ff5-cf29-432b-863d-8aa70758eeed', 'userData': 'Blackbear'}, #{'persistedFaceId': 'ef692497-e61f-4302-895d-43d850aa12ca', 'userData': 'PostMalone'}, #{'persistedFaceId': 'fc594817-b796-458e-86d8-a7aa0171d75c', 'userData': 'ConanGray'}, #{'persistedFaceId': 'b0d27b78-2b7e-449a-af9c-a6b906c647fb', 'userData': 'Logic'}, #{'persistedFaceId': 'd54ae7ec-858c-4c61-88f3-2086c603e13b', 'userData': 'lany'}, #{'persistedFaceId': '4b9d6a82-8dee-46f0-8ddb-aa58c86019d2', 'userData': 'LewisCapaldi'}, #{'persistedFaceId': '5d0fd1d0-1130-47e8-a91b-0200e34545c1', 'userData': 'Christopher'}, #{'persistedFaceId': 'ce6e1f1b-1011-4cdc-bb62-c261ea34ea28', 'userData': 'JB'}] ​ # Detect face 删除列表内人脸id faceListId ="zy03" delete_face_url ="https://api-zy.cognitiveservices.azure.com//face/v1.0/facelists/{}/persistedfaces/{}" ​ assert subscription_key ​ persistedFaceId ='ce6e1f1b-1011-4cdc-bb62-c261ea34ea28' # 直接取上面获得的ID{'persistedFaceId': 'f18450d3-60d2-45f3-a69e-783574dc3ce8'} ​ headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } ​ # 注意requests请求为delete r_delete_face = requests.delete(delete_face_url.format(faceListId,persistedFaceId),headers=headers)#学生填写 r_delete_face # 检查列表内人脸列表 get_face_url ="https://api-zy.cognitiveservices.azure.com/face/v1.0/facelists/wx02" r_get_facelist = requests.get(get_facelist_url,headers=headers) r_get_facelist.json() {'persistedFaces': [{'persistedFaceId': '4f7d29aa-3328-4db4-aa99-078ca099f06b', 'userData': 'Tay'}, {'persistedFaceId': 'c0369103-92a9-45da-9360-1032217e2a58', 'userData': 'Blackbear'}, {'persistedFaceId': 'cdab2d9e-a699-4ea8-bfea-f055b9e59efa', 'userData': 'PostMalone'}, {'persistedFaceId': '5183429e-0f90-4f75-8b8a-e75e497f68ef', 'userData': 'ConanGray'}, {'persistedFaceId': '217752cd-2fb2-4f91-a454-8286f9cbb872', 'userData': 'Logic'}, {'persistedFaceId': 'e65041fc-42c4-4f9c-bfa5-964088a7e432', 'userData': 'lany'}, {'persistedFaceId': '00608469-807f-46a5-a203-11610c8944f8', 'userData': 'LewisCapaldi'}, {'persistedFaceId': '27dcf411-1e11-410c-b3a7-755d38ddc758', 'userData': 'Christopher'}, {'persistedFaceId': '3ae39d18-995e-4d3a-ad92-5bddf521d183', 'userData': 'JB'}, {'persistedFaceId': '7b982ff5-cf29-432b-863d-8aa70758eeed', 'userData': 'Blackbear'}, {'persistedFaceId': 'ef692497-e61f-4302-895d-43d850aa12ca', 'userData': 'PostMalone'}, {'persistedFaceId': 'fc594817-b796-458e-86d8-a7aa0171d75c', 'userData': 'ConanGray'}, {'persistedFaceId': 'b0d27b78-2b7e-449a-af9c-a6b906c647fb', 'userData': 'Logic'}, {'persistedFaceId': 'd54ae7ec-858c-4c61-88f3-2086c603e13b', 'userData': 'lany'}, {'persistedFaceId': '4b9d6a82-8dee-46f0-8ddb-aa58c86019d2', 'userData': 'LewisCapaldi'}, {'persistedFaceId': '5d0fd1d0-1130-47e8-a91b-0200e34545c1', 'userData': 'Christopher'}], 'faceListId': 'zy03', 'name': '相册集'} # 3、检测人脸的id # replace with the string from your endpoint URL face_api_url = 'https://api-zy.cognitiveservices.azure.com/face/v1.0/detect' assert subscription_key ​ # 请求正文 image_url = 'https://wx2.sinaimg.cn/mw690/ecff9b1aly1gh1xv6j4lcj20u011h44a.jpg' ​ headers = {'Ocp-Apim-Subscription-Key': subscription_key } ​ # 请求参数 params = { 'returnFaceId': 'true', 'returnFaceLandmarks': 'false', # 选择model 'recognitionModel':'recognition_03',#此参数需与facelist参数一致 'detectionModel':'detection_01', # 可选参数,请仔细阅读API文档 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise', } ​ response = requests.post(face_api_url, params=params, headers=headers, json={"url": image_url}) # json.dumps 将json--->字符串 response.json() [{'faceId': 'e1fdeb03-db0f-45c8-be55-422ed3c6f517', 'faceRectangle': {'top': 131, 'left': 251, 'width': 248, 'height': 248}, 'faceAttributes': {'smile': 0.0, 'headPose': {'pitch': -5.1, 'roll': 1.9, 'yaw': 3.9}, 'gender': 'male', 'age': 27.0, 'facialHair': {'moustache': 0.1, 'beard': 0.1, 'sideburns': 0.1}, 'glasses': 'NoGlasses', 'emotion': {'anger': 0.0, 'contempt': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happiness': 0.0, 'neutral': 0.999, 'sadness': 0.001, 'surprise': 0.0}, 'blur': {'blurLevel': 'low', 'value': 0.03}, 'exposure': {'exposureLevel': 'goodExposure', 'value': 0.62}, 'noise': {'noiseLevel': 'low', 'value': 0.19}, 'makeup': {'eyeMakeup': False, 'lipMakeup': False}, 'accessories': [{'type': 'headwear', 'confidence': 1.0}], 'occlusion': {'foreheadOccluded': True, 'eyeOccluded': False, 'mouthOccluded': False}, 'hair': {'bald': 0.0, 'invisible': True, 'hairColor': []}}}] findsimilars_url = "https://api-zy.cognitiveservices.azure.com/face/v1.0/findsimilars" ​ # 请求正文 faceId需要先检测一张照片获取 data_findsimilars = { "faceId":"e1fdeb03-db0f-45c8-be55-422ed3c6f517",#取上方的faceID "faceListId": "zy03", "maxNumOfCandidatesReturned": 10, "mode": "matchFace"#matchPerson #一种为验证模式,一种为相似值模式 } ​ r_findsimilars = requests.post(findsimilars_url,headers=headers,json=data_findsimilars) r_findsimilars.json() [{'persistedFaceId': 'c0369103-92a9-45da-9360-1032217e2a58', 'confidence': 1.0}, {'persistedFaceId': '7b982ff5-cf29-432b-863d-8aa70758eeed', 'confidence': 1.0}, {'persistedFaceId': '217752cd-2fb2-4f91-a454-8286f9cbb872', 'confidence': 0.19634}, {'persistedFaceId': 'b0d27b78-2b7e-449a-af9c-a6b906c647fb', 'confidence': 0.19634}, {'persistedFaceId': '3ae39d18-995e-4d3a-ad92-5bddf521d183', 'confidence': 0.19118}, {'persistedFaceId': 'e65041fc-42c4-4f9c-bfa5-964088a7e432', 'confidence': 0.14233}, {'persistedFaceId': 'd54ae7ec-858c-4c61-88f3-2086c603e13b', 'confidence': 0.14233}, {'persistedFaceId': '5183429e-0f90-4f75-8b8a-e75e497f68ef', 'confidence': 0.0996}, {'persistedFaceId': 'fc594817-b796-458e-86d8-a7aa0171d75c', 'confidence': 0.0996}, {'persistedFaceId': 'cdab2d9e-a699-4ea8-bfea-f055b9e59efa', 'confidence': 0.09558}] #facelist里面的数据 import pandas as pd adf = pd.json_normalize(r_get_facelist.json()["persistedFaces"])# 升级pandas才能运行 adf persistedFaceId userData 0 4f7d29aa-3328-4db4-aa99-078ca099f06b Tay 1 c0369103-92a9-45da-9360-1032217e2a58 Blackbear 2 cdab2d9e-a699-4ea8-bfea-f055b9e59efa PostMalone 3 5183429e-0f90-4f75-8b8a-e75e497f68ef ConanGray 4 217752cd-2fb2-4f91-a454-8286f9cbb872 Logic 5 e65041fc-42c4-4f9c-bfa5-964088a7e432 lany 6 00608469-807f-46a5-a203-11610c8944f8 LewisCapaldi 7 27dcf411-1e11-410c-b3a7-755d38ddc758 Christopher 8 3ae39d18-995e-4d3a-ad92-5bddf521d183 JB 9 7b982ff5-cf29-432b-863d-8aa70758eeed Blackbear 10 ef692497-e61f-4302-895d-43d850aa12ca PostMalone 11 fc594817-b796-458e-86d8-a7aa0171d75c ConanGray 12 b0d27b78-2b7e-449a-af9c-a6b906c647fb Logic 13 d54ae7ec-858c-4c61-88f3-2086c603e13b lany 14 4b9d6a82-8dee-46f0-8ddb-aa58c86019d2 LewisCapaldi 15 5d0fd1d0-1130-47e8-a91b-0200e34545c1 Christopher # 返回相似度的数据 bdf = pd.json_normalize(r_findsimilars.json())# 升级pandas才能运行 bdf persistedFaceId confidence 0 c0369103-92a9-45da-9360-1032217e2a58 1.00000 1 7b982ff5-cf29-432b-863d-8aa70758eeed 1.00000 2 217752cd-2fb2-4f91-a454-8286f9cbb872 0.19634 3 b0d27b78-2b7e-449a-af9c-a6b906c647fb 0.19634 4 3ae39d18-995e-4d3a-ad92-5bddf521d183 0.19118 5 e65041fc-42c4-4f9c-bfa5-964088a7e432 0.14233 6 d54ae7ec-858c-4c61-88f3-2086c603e13b 0.14233 7 5183429e-0f90-4f75-8b8a-e75e497f68ef 0.09960 8 fc594817-b796-458e-86d8-a7aa0171d75c 0.09960 9 cdab2d9e-a699-4ea8-bfea-f055b9e59efa 0.09558 #合并在一起,得出班级能谁最像你 pd.merge(adf, bdf,how='inner', on='persistedFaceId').sort_values(by="confidence",ascending = False) persistedFaceId userData confidence 0 c0369103-92a9-45da-9360-1032217e2a58 Blackbear 1.00000 6 7b982ff5-cf29-432b-863d-8aa70758eeed Blackbear 1.00000 3 217752cd-2fb2-4f91-a454-8286f9cbb872 Logic 0.19634 8 b0d27b78-2b7e-449a-af9c-a6b906c647fb Logic 0.19634 5 3ae39d18-995e-4d3a-ad92-5bddf521d183 JB 0.19118 4 e65041fc-42c4-4f9c-bfa5-964088a7e432 lany 0.14233 9 d54ae7ec-858c-4c61-88f3-2086c603e13b lany 0.14233 2 5183429e-0f90-4f75-8b8a-e75e497f68ef ConanGray 0.09960 7 fc594817-b796-458e-86d8-a7aa0171d75c ConanGray 0.09960 1 cdab2d9e-a699-4ea8-bfea-f055b9e59efa PostMalone 0.09558 ``` ## Face++ ``` # 1.先导入需要的模块 import requests ​ # 2.输入我们api_secret、api_key api_secret = "_DAnfXANFVF6sfJjxeo8KSBya2-qfWxk" api_key = 'dOpMliQxn4WUJobcHsZw9715N7lsS3wN' # 3.目标url # 这里也可以使用本地图片 例如:filepath ="image/tupian.jpg" BASE_URL = 'https://api-cn.faceplusplus.com/facepp/v3/detect' img_url = 'https://gitee.com/AliceZING/apiworkspace/raw/master/lzy.jpg' ​ #4.沿用API文档的示范代码,准备我们的headers和图片(数据) headers = { 'Content-Type': 'application/json', } # 5.准备后面的数据 payload = { "image_url":img_url, 'api_key': api_key, 'api_secret': api_secret, # 是否检测并返回根据人脸特征判断出的年龄、性别、情绪等属性。 'return_attributes':'gender,age,smiling,emotion', } # 6.requests发送我们请求 r = requests.post(BASE_URL, params=payload, headers=headers) r.status_code 200 r.content b'{"request_id":"1603528161,bf04685a-191e-4ce4-8633-ae5842c503d8","time_used":1330,"faces":[{"face_token":"9a6ebaa4d2e0253c195f8db569d73fb9","face_rectangle":{"top":439,"left":444,"width":439,"height":439},"attributes":{"gender":{"value":"Female"},"age":{"value":20},"smile":{"value":99.837,"threshold":50.000},"emotion":{"anger":0.003,"disgust":0.001,"fear":0.001,"happiness":77.495,"neutral":0.010,"sadness":0.001,"surprise":22.488}}}],"image_id":"PlmSgvTxhOxSDaCZGBbs0w==","face_num":1}\n' results = r.json() results {'request_id': '1603528161,bf04685a-191e-4ce4-8633-ae5842c503d8', 'time_used': 1330, 'faces': [{'face_token': '9a6ebaa4d2e0253c195f8db569d73fb9', 'face_rectangle': {'top': 439, 'left': 444, 'width': 439, 'height': 439}, 'attributes': {'gender': {'value': 'Female'}, 'age': {'value': 20}, 'smile': {'value': 99.837, 'threshold': 50.0}, 'emotion': {'anger': 0.003, 'disgust': 0.001, 'fear': 0.001, 'happiness': 77.495, 'neutral': 0.01, 'sadness': 0.001, 'surprise': 22.488}}}], 'image_id': 'PlmSgvTxhOxSDaCZGBbs0w==', 'face_num': 1} api_secret = "_DAnfXANFVF6sfJjxeo8KSBya2-qfWxk" api_key = 'dOpMliQxn4WUJobcHsZw9715N7lsS3wN' ​ # 1.FaceSet Create import requests,json ​ display_name = "人脸集合2020" #自定义人脸集合的名字 outer_id = "2020" #自定义标识 user_data = "renlian" #自定义用户信息 ​ CreateFace_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/create" #调用URL payload = { # 请求参数 'api_key': api_key, 'api_secret': api_secret, 'display_name':display_name, 'outer_id':outer_id, 'user_data':user_data } r = requests.post(CreateFace_Url, params=payload) r.json() {'faceset_token': '1c5f583e56c0475969026fc0516bc857', 'time_used': 143, 'face_count': 0, 'face_added': 0, 'request_id': '1603529415,21cdc2ab-b817-46d0-b218-ceb4ca86ae3b', 'outer_id': '2020', 'failure_detail': []} # 2.FaceSet GetDetail(获取人脸集合信息) GetDetail_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/getdetail" payload = { 'api_key': api_key, 'api_secret': api_secret, 'outer_id':outer_id, } r = requests.post(GetDetail_Url,params=payload) r.json() {'faceset_token': '1c5f583e56c0475969026fc0516bc857', 'tags': '', 'time_used': 85, 'user_data': 'renlian', 'display_name': '人脸集合2020', 'face_tokens': [], 'face_count': 0, 'request_id': '1603529729,621711f9-2b4e-4424-a342-6c1fe2300e42', 'outer_id': '2020'} # 3.FaceSet AddFace(增加人脸信息) AddFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/addface" ​ payload = { 'api_key': api_key, 'api_secret': api_secret, 'faceset_token':'1c5f583e56c0475969026fc0516bc857', 'face_tokens':'9a6ebaa4d2e0253c195f8db569d73fb9', # qianmiande } r = requests.post(AddFace_url,params=payload) r.json() {'faceset_token': '1c5f583e56c0475969026fc0516bc857', 'time_used': 630, 'face_count': 1, 'face_added': 1, 'request_id': '1603529731,c6cddd63-15e5-4166-b145-6050ffe5eba2', 'outer_id': '2020', 'failure_detail': []} # 4.FaceSet RemoveFace(移除人脸信息) RemoveFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/removeface" ​ payload = { 'api_key': api_key, 'api_secret': api_secret, 'faceset_token':'1c5f583e56c0475969026fc0516bc857', 'face_tokens':'9a6ebaa4d2e0253c195f8db569d73fb9', } r = requests.post(RemoveFace_url,params=payload) r.json() {'faceset_token': '1c5f583e56c0475969026fc0516bc857', 'face_removed': 1, 'time_used': 172, 'face_count': 0, 'request_id': '1603529796,890511a5-e5d4-4b40-aa6d-400c9f0c698b', 'outer_id': '2020', 'failure_detail': []} # 5.FaceSet Update(更新人脸集合信息) Update_url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/update" payload = { 'api_key': api_key, 'api_secret': api_secret, 'faceset_token':'1c5f583e56c0475969026fc0516bc857', 'user_data':"renlian", } r = requests.post(Update_url,params=payload) r.json() {'faceset_token': '1c5f583e56c0475969026fc0516bc857', 'request_id': '1603530460,2d58e296-985a-4601-a3a7-7eb0b8c0505b', 'time_used': 80, 'outer_id': '2020'} # 6.Compare Face(对比人脸相似度) tay = "https://wx3.sinaimg.cn/mw690/ed4d61ably1ge90p0nsoqj20u011i439.jpg" #liudehua02 = "https://tse3-mm.cn.bing.net/th/id/OIP.Xz3HbYZeNrdUnGJ7vXNzsQHaKO?pid=Api&rs=1" lauv = "https://wx2.sinaimg.cn/mw690/006UkIMQgy1gilovpedhzj30u011ijv3.jpg" ​ Compare_url = "https://api-cn.faceplusplus.com/facepp/v3/compare" payload ={ 'api_key': api_key, 'api_secret': api_secret, 'image_url1':tay, 'image_url2':lauv, } r = requests.post(Compare_url,params=payload) r.json() {'faces1': [{'face_rectangle': {'width': 304, 'top': 167, 'left': 171, 'height': 304}, 'face_token': 'd65ff8d654813ae7dfdb23b88faaccef'}], 'faces2': [{'face_rectangle': {'width': 142, 'top': 266, 'left': 256, 'height': 142}, 'face_token': '3aa9f2511c29a8cff068c73b8299cb60'}, {'face_rectangle': {'width': 83, 'top': 595, 'left': 298, 'height': 83}, 'face_token': '7f623eb0072627164cfa61b510a1ad9c'}], 'time_used': 702, 'thresholds': {'1e-3': 62.327, '1e-5': 73.975, '1e-4': 69.101}, 'confidence': 42.191, 'image_id2': 'yIh/tAI0SYhxuFBfE3+EQA==', 'image_id1': 'ACWk+vVP6trmH7sljHsfgA==', 'request_id': '1603530787,4960705a-965b-4d6b-9d1f-3ad3d72c6050'} ``` ## 百度智能云 ``` #人脸检测 # encoding:utf-8 import requests ​ # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=oAQNuqHp2iREGGiPmnBrlNbp&client_secret=e0kakc8XlswmW4bmplYe2SSQMrKMxMaE' response = requests.get(host) if response: print(response) response.json() {'refresh_token': '25.9adb34a8a046794e4cf345a258a61542.315360000.1918895896.282335-22870043', 'expires_in': 2592000, 'session_key': '9mzdWTk69JrDLm5efPNleA6FxmFPijWOSfUHpGcV06KFqqPXfCvVsOwpGPkgWaDaesAb1zIxltBxP/CQiPbC/UrQREJyUg==', 'access_token': '24.bc461ac1ea96b00a47a891199a3d6592.2592000.1606127896.282335-22870043', 'scope': 'public brain_all_scope vis-faceverify_faceverify_h5-face-liveness vis-faceverify_FACE_V3 vis-faceverify_idl_face_merge vis-faceverify_FACE_EFFECT vis-faceverify_face_feature_sdk wise_adapt lebo_resource_base lightservice_public hetu_basic lightcms_map_poi kaidian_kaidian ApsMisTest_Test权限 vis-classify_flower lpq_开放 cop_helloScope ApsMis_fangdi_permission smartapp_snsapi_base smartapp_mapp_dev_manage iop_autocar oauth_tp_app smartapp_smart_game_openapi oauth_sessionkey smartapp_swanid_verify smartapp_opensource_openapi smartapp_opensource_recapi fake_face_detect_开放Scope vis-ocr_虚拟人物助理 idl-video_虚拟人物助理 smartapp_component', 'session_secret': 'c55b692c56aa7fe4208bb13ce63bdbfa'} request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect" ​ params = "{\"image\":\"https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1603546140711&di=399a2525b6cc04645ad9cbe56a993b86&imgtype=0&src=http%3A%2F%2Fb-ssl.duitang.com%2Fuploads%2Fitem%2F201803%2F17%2F20180317100748_qclgn.jpg\",\"image_type\":\"URL\",\"face_field\":\"faceshape,facetype\"}" ​ access_token = '24.bc461ac1ea96b00a47a891199a3d6592.2592000.1606127896.282335-22870043' # 调用鉴权接口获取的token request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) response.json() {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 1599991012565, 'timestamp': 1603536111, 'cached': 0, 'result': {'face_num': 1, 'face_list': [{'face_token': '21944693d8a5aeb22785ad7e2b906d7f', 'location': {'left': 390.87, 'top': 382.09, 'width': 192, 'height': 178, 'rotation': -15}, 'face_probability': 1, 'angle': {'yaw': 13.21, 'pitch': 13.65, 'roll': -21.04}, 'face_shape': {'type': 'oval', 'probability': 0.7}, 'face_type': {'type': 'human', 'probability': 1}}]}} # 人脸对比 request_url = "https://aip.baidubce.com/rest/2.0/face/v3/match" ​ params = "[{\"image\": \"https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1603548374556&di=572bd00cf25b2a9a175371c7b338d49c&imgtype=0&src=http%3A%2F%2Fdingyue.ws.126.net%2F2020%2F0422%2F7c636463j00q95ekh0049c000xc00m8m.jpg\", \"image_type\": \"URL\", \"face_type\": \"CERT\", \"quality_control\": \"LOW\"}, {\"image\": \"https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1603531227645&di=f4ba116b72f9c13d55fe0f7ed1a3e808&imgtype=0&src=http%3A%2F%2Fimgtmp.nos-jd.163yun.com%2Fimg%2Fc0VaMnFnaitYWW9SclFsMTArUE9pa1F2WVYyWG9vVW5XZUNzMGdmMWdnMWwweXRnQXR4RDZBPT0.jpg%3FimageView%26thumbnail%3D2160x0%26quality%3D90%26interlace%3D1%26type%3Djpg\", \"image_type\": \"URL\", \"face_type\": \"LIVE\", \"quality_control\": \"LOW\"}]" ​ access_token = '24.bc461ac1ea96b00a47a891199a3d6592.2592000.1606127896.282335-22870043' # 调用鉴权接口获取的token request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) response.json() {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 9455654510165, 'timestamp': 1603538832, 'cached': 0, 'result': {'score': 15.38564301, 'face_list': [{'face_token': 'ecba5a4527a0b6b42170fc65f5ce66fc'}, {'face_token': '94b16e36542180404f1c31dd57e850b5'}]}} # 创建用户组 request_url = "https://aip.baidubce.com/rest/2.0/face/v3/faceset/group/add" ​ params = "{\"group_id\":\"group3\"}" access_token = '24.bc461ac1ea96b00a47a891199a3d6592.2592000.1606127896.282335-22870043' # 调用鉴权接口获取的token request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) response.json() {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 8989201252019, 'timestamp': 1603531449, 'cached': 0, 'result': None} # 人脸注册 request_url = "https://aip.baidubce.com/rest/2.0/face/v3/faceset/user/add" params = params = "{\"image\":\"https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1603548374556&di=572bd00cf25b2a9a175371c7b338d49c&imgtype=0&src=http%3A%2F%2Fdingyue.ws.126.net%2F2020%2F0422%2F7c636463j00q95ekh0049c000xc00m8m.jpg\",\"image_type\":\"URL\",\"group_id\":\"group3\",\"user_id\":\"user1\",\"user_info\":\"abc\",\"quality_control\":\"LOW\"}" request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print (response.json()) {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 1011589899910, 'timestamp': 1603539016, 'cached': 0, 'result': {'face_token': 'ecba5a4527a0b6b42170fc65f5ce66fc', 'location': {'left': 505.8, 'top': 231.53, 'width': 256, 'height': 237, 'rotation': 4}}} response.json() {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 3510194650010, 'timestamp': 1603531458, 'cached': 0, 'result': {'face_token': '94648755df8961a167c9193dc82ee6a4', 'location': {'left': 340.67, 'top': 277.64, 'width': 206, 'height': 216, 'rotation': 3}}} # 获取用户人脸列表 request_url = "https://aip.baidubce.com/rest/2.0/face/v3/faceset/face/getlist" ​ params = "{\"user_id\":\"user1\",\"group_id\":\"group3\"}" access_token = '24.bc461ac1ea96b00a47a891199a3d6592.2592000.1606127896.282335-22870043' # 调用鉴权接口获取的token request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) response.json() {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 6565847975259, 'timestamp': 1603539088, 'cached': 0, 'result': {'face_list': [{'face_token': 'ecba5a4527a0b6b42170fc65f5ce66fc', 'ctime': '2020-10-24 19:30:17'}]}} # 删除用户组 request_url = "https://aip.baidubce.com/rest/2.0/face/v3/faceset/group/delete" ​ params = "{\"group_id\":\"group3\"}" access_token = '24.bc461ac1ea96b00a47a891199a3d6592.2592000.1606127896.282335-22870043'# 调用鉴权接口获取的token request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) response.json() ​ {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 11599101946, 'timestamp': 1603539089, 'cached': 0, 'result': None} ``` ## 计算机视觉 1、 ``` import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt import json from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" endpoint = "https://lzycomputervision.cognitiveservices.azure.com/" analyze_url = endpoint + "vision/v3.1/analyze" # Set image_url to the URL of an image that you want to analyze. image_url = "https://cdn.qetic.jp/wp-content/uploads/2020/04/03122633/music200403_lauv_2-1920x2878.jpeg" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'visualFeatures': 'Categories,Description,Color'} data = {'url': image_url} response = requests.post(analyze_url, headers=headers, params=params, json=data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The most # relevant caption for the image is obtained from the 'description' property. analysis = response.json() print(json.dumps(response.json())) image_caption = analysis["description"]["captions"][0]["text"].capitalize() # Display the image and overlay it with the caption. image = Image.open(BytesIO(requests.get(image_url).content)) plt.imshow(image) plt.axis("off") _ = plt.title(image_caption, size="x-large", y=-0.1) plt.show() ``` 2、 ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" endpoint = "https://lzycomputervision.cognitiveservices.azure.com/" analyze_url = endpoint + "vision/v3.1/analyze" # Set image_path to the local path of an image that you want to analyze. # Sample images are here, if needed: # https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/ComputerVision/Images image_path = "C:/Users/Zzzing/Pictures/Camera Roll/bobopool.jpg" # Read the image into a byte array image_data = open(image_path, "rb").read() headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'} params = {'visualFeatures': 'Categories,Description,Color'} response = requests.post( analyze_url, headers=headers, params=params, data=image_data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The most # relevant caption for the image is obtained from the 'description' property. analysis = response.json() print(analysis) image_caption = analysis["description"]["captions"][0]["text"].capitalize() # Display the image and overlay it with the caption. image = Image.open(BytesIO(image_data)) plt.imshow(image) plt.axis("off") _ = plt.title(image_caption, size="x-large", y=-0.1) plt.show() ``` 3、 ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following lines. # %matplotlib inline # import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" endpoint = "https://lzycomputervision.cognitiveservices.azure.com/" thumbnail_url = endpoint + "vision/v3.1/generateThumbnail" # Set image_url to the URL of an image that you want to analyze. image_url = "https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1603559122087&di=d65ae51b7878a865b26e87f14071d7e3&imgtype=0&src=http%3A%2F%2Fn.sinaimg.cn%2Fsinacn15%2F275%2Fw640h435%2F20181010%2Fcaba-hkrzvkw4936632.jpg" # Construct URL headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'width': '50', 'height': '50', 'smartCropping': 'true'} data = {'url': image_url} # Call API response = requests.post(thumbnail_url, headers=headers, params=params, json=data) response.raise_for_status() # Open the image from bytes thumbnail = Image.open(BytesIO(response.content)) # Verify the thumbnail size. print("Thumbnail is {0}-by-{1}".format(*thumbnail.size)) # Save thumbnail to file thumbnail.save('thumbnail.png') # Display image thumbnail.show() # Optional. Display the thumbnail from Jupyter. # plt.imshow(thumbnail) # plt.axis("off") ``` 4、 ``` import json import os import sys import requests import time # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.patches import Polygon from PIL import Image from io import BytesIO missing_env = False # Add your Computer Vision subscription key and endpoint to your environment variables. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" endpoint = "https://lzycomputervision.cognitiveservices.azure.com/" text_recognition_url = endpoint + "/vision/v3.1/read/analyze" # Set image_url to the URL of an image that you want to recognize. image_url = "https://ww2.sinaimg.cn/bmiddle/8206a628gw1f9ii2qu7p5j20hs0hsgor.jpg" headers = {'Ocp-Apim-Subscription-Key': subscription_key} data = {'url': image_url} response = requests.post( text_recognition_url, headers=headers, json=data) response.raise_for_status() # Extracting text requires two API calls: One call to submit the # image for processing, the other to retrieve the text found in the image. # Holds the URI used to retrieve the recognized text. operation_url = response.headers["Operation-Location"] # The recognized text isn't immediately available, so poll to wait for completion. analysis = {} poll = True while (poll): response_final = requests.get( response.headers["Operation-Location"], headers=headers) analysis = response_final.json() print(json.dumps(analysis, indent=4)) time.sleep(1) if ("analyzeResult" in analysis): poll = False if ("status" in analysis and analysis['status'] == 'failed'): poll = False polygons = [] if ("analyzeResult" in analysis): # Extract the recognized text, with bounding boxes. polygons = [(line["boundingBox"], line["text"]) for line in analysis["analyzeResult"]["readResults"][0]["lines"]] # Display the image and overlay it with the extracted text. image = Image.open(BytesIO(requests.get(image_url).content)) ax = plt.imshow(image) for polygon in polygons: vertices = [(polygon[0][i], polygon[0][i+1]) for i in range(0, len(polygon[0]), 2)] text = polygon[1] patch = Polygon(vertices, closed=True, fill=False, linewidth=2, color='y') ax.axes.add_patch(patch) plt.text(vertices[0][0], vertices[0][1], text, fontsize=20, va="top") plt.show() ``` 5、 ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" endpoint = "https://lzycomputervision.cognitiveservices.azure.com/" ocr_url = endpoint + "vision/v3.1/ocr" # Set image_url to the URL of an image that you want to analyze. image_url = "https://n1image.hjfile.cn/mh/2017/10/25/0cd5d8f9df976763c923e538efa1132b.png" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'language': 'unk', 'detectOrientation': 'true'} data = {'url': image_url} response = requests.post(ocr_url, headers=headers, params=params, json=data) response.raise_for_status() analysis = response.json() # Extract the word bounding boxes and text. line_infos = [region["lines"] for region in analysis["regions"]] word_infos = [] for line in line_infos: for word_metadata in line: for word_info in word_metadata["words"]: word_infos.append(word_info) word_infos # Display the image and overlay it with the extracted text. plt.figure(figsize=(5, 5)) image = Image.open(BytesIO(requests.get(image_url).content)) ax = plt.imshow(image, alpha=0.5) for word in word_infos: bbox = [int(num) for num in word["boundingBox"].split(",")] text = word["text"] origin = (bbox[0], bbox[1]) patch = Rectangle(origin, bbox[2], bbox[3], fill=False, linewidth=2, color='y') ax.axes.add_patch(patch) plt.text(origin[0], origin[1], text, fontsize=20, weight="bold", va="top") plt.show() plt.axis("off") ``` 6.1、 ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" endpoint = "https://lzycomputervision.cognitiveservices.azure.com/" landmark_analyze_url = endpoint + "vision/v3.1/models/landmarks/analyze" # Set image_url to the URL of an image that you want to analyze. image_url = "http://scenery.image.nihaowang.com/scenery/150/474/201106082241229687.jpg" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'model': 'landmarks'} data = {'url': image_url} response = requests.post( landmark_analyze_url, headers=headers, params=params, json=data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The # most relevant landmark for the image is obtained from the 'result' property. analysis = response.json() assert analysis["result"]["landmarks"] is not [] print(analysis) landmark_name = analysis["result"]["landmarks"][0]["name"].capitalize() # Display the image and overlay it with the landmark name. image = Image.open(BytesIO(requests.get(image_url).content)) plt.imshow(image) plt.axis("off") _ = plt.title(landmark_name, size="x-large", y=-0.1) plt.show() ``` 6、2 ``` import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Replace with your valid subscription key. subscription_key = "0edfadf16cda4f44ae5bca3c4ef52e8f" assert subscription_key vision_base_url = "https://lzycomputervision.cognitiveservices.azure.com/vision/v2.1/" celebrity_analyze_url = vision_base_url + "models/celebrities/analyze" # Set image_url to the URL of an image that you want to analyze. image_url = "https://pic.36krcnd.com/avatar/201611/11092316/nlp5ymf1ett5aasa.jpg" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'model': 'celebrities'} data = {'url': image_url} response = requests.post( celebrity_analyze_url, headers=headers, params=params, json=data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The # most relevant celebrity for the image is obtained from the 'result' property. analysis = response.json() assert analysis["result"]["celebrities"] is not [] print(analysis) celebrity_name = analysis["result"]["celebrities"][0]["name"].capitalize() # Display the image and overlay it with the celebrity name. image = Image.open(BytesIO(requests.get(image_url).content)) plt.imshow(image) plt.axis("off") _ = plt.title(celebrity_name, size="x-large", y=-0.1) plt.show() ``` ## 学习心得 在学习API的过程中,看得出其实对我们的代码能力要求并不是特别的高,但是我们的基础知识本来懂的就不多,对英文的要求也有一定的难度。 其中,最最最最最最最最需要的是耐心,在运行过程中需要高度集中,但由于有很多新情况是我们没见过的,需要我们分心去找问题,弄懂再倒回来看自己的代码有时真的会晕。 还有总是40404040404040404400400400400报错报错真的会让人很恼火。 希望error不要磨灭我的耐心还有希望自己有足够的精力自主学习。