windows下无法启动pip

尤其是在有多版本Python共存的情况下,修改windows修改环境变量后,经常会导致pip无法启动的情况。
此时,不仅是pip,Python/Scripts目录下的所有脚本都无法启动,并会有如下错误:

Fatal error in launcher: Unable to create process using '"'

其根本原因,其实十分简单,pip无法找到python.exe可执行程序,你可以看pip的源码来确认这一点。
有几种方法可以解决这个问题:

1、环境变量法,更适合单Ptyhon环境
将python.exe路径,增加到PATH环境变量中即可解决问题

2、脚本启动法,适合多个Ptyhon环境

set PATH=PATH_TO_PYTHON\;PATH_TO_PYTHON\Scripts;%PATH%
python -m pip install XXX

3、用1或2,更新pip,可以解决问题(对单Python环境更适用)

python -m pip install --upgrade pip

4、修改pip二进制文件
用十六进制编辑工具打开pip.exe
修改python.exe路径
保存

5、用PE编辑器修改pip二进制文件
同方法4

6、解压
用解压工具解压pip,
得到__main__.py
重命名为pip.py
运行

python pip.py install XXX

Python抓取JSON网页内容

# -*- coding: UTF-8 -*-
'''
Created on 20150206

@author: Hansen
'''

import urllib2
import sys
import io
import json

#Fetch HTML from URL
def fecth_html(index,url,keepHtml,resultFile):
    req = urllib2.Request(url)
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0')
    rsp = urllib2.urlopen(req)
    content = rsp.read()
    #receive_header = rsp.info()
    #print(sys.getfilesystemencoding())
    #content = content.decode('utf-8','replace')
    
    if keepHtml:
        fileinfo = open(str(index)+'.html','w')
        fileinfo.write(content)
        print("save file "+ str(index)+'.html:    ok')
        
    parse_json(content,resultFile)
    
#Parse HTML
def parse_json(content,resultFile):
    jsonData = json.loads(content)
    shops = jsonData['shopBeans']
    print(len(shops))
    for shop in shops:
        szTitle = shop['filterFullName']
        szTitle = szTitle.replace("\r\n", "-").replace(" ","");
        szStar = shop['shopPowerTitle']
        szMeanPrice = str(shop['avgPrice'])
        szMeanPrice = szMeanPrice.replace("\n", "").replace(" ","");
        szAddressA = shop['mainRegionName']
        szAddressB = shop['address']
        szAddress = (szAddressA+"-"+szAddressB).replace("\r\n", "-").replace(" ","");
        szTaste = shop['refinedScore1']
        szEvn = shop['refinedScore2']
        szService = shop['refinedScore3']
        
        fileinfo = io.open(resultFile,'a',encoding='utf_16')
        fileinfo.write(szTitle+","+szStar+","+szMeanPrice+","+szAddress+"," +szTaste+","+szEvn+","+szService+"\n")

Python抓取网页内容

1、BeautifulSoup解析网页

'''
Created on 20150203
@author: Hansen
'''

import urllib2
import sys
import io
from bs4 import BeautifulSoup

#Fetch HTML from URL
def fecth_html(index,url,keepHtml,resultFile):
    req = urllib2.Request(url)
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0')
    rsp = urllib2.urlopen(req)
    content = rsp.read()
    #receive_header = rsp.info()
    #print(sys.getfilesystemencoding())
    #content = content.decode('utf-8','replace')
    
    if keepHtml:
        fileinfo = open(str(index)+'.html','w')
        fileinfo.write(content)
        print("save file "+ str(index)+'.html:    ok')
        
    parse_html(content,resultFile)
    
#Parse HTML
def parse_html(html,resultFile):
    soup = BeautifulSoup(html,fromEncoding="utf8")
    lis = soup.select('div.shop-all-list li')
    print(len(lis))
    for li in lis:
        szTitle = (li.select('div:nth-of-type(2) div:nth-of-type(1) a h4'))[0].get_text()
        szTitle = szTitle.replace("\r\n", "-").replace(" ","");
        szStar = (li.select('div:nth-of-type(2) div:nth-of-type(3) span'))[0]['title']
        szReviewNum = (li.select('div:nth-of-type(2) div:nth-of-type(3) a:nth-of-type(1)'))[0].get_text()
        szReviewNum = szReviewNum.replace("\n", "").replace(" ","");
        szMeanPrice = (li.select('div:nth-of-type(2) div:nth-of-type(3) a:nth-of-type(2)'))[0].get_text()
        szMeanPrice = szMeanPrice.replace("\n", "").replace(" ","");
        szCategory = (li.select('div:nth-of-type(2) div:nth-of-type(4) a:nth-of-type(1)'))[0].get_text()
        szAddressA = (li.select('div:nth-of-type(2) div:nth-of-type(4) a:nth-of-type(2)'))[0].get_text()
        szAddressB = (li.select('div:nth-of-type(2) div:nth-of-type(4) span:nth-of-type(3)'))[0].get_text()
        szAddress = (szAddressA+"-"+szAddressB).replace("\r\n", "-").replace(" ","");
        szTaste = (li.select('div:nth-of-type(2) span:nth-of-type(5) span:nth-of-type(1)'))[0].get_text()
        szEvn = (li.select('div:nth-of-type(2) span:nth-of-type(5) span:nth-of-type(2)'))[0].get_text()
        szService = (li.select('div:nth-of-type(2) span:nth-of-type(5) span:nth-of-type(3)'))[0].get_text()
        
        fileinfo = io.open(resultFile,'a',encoding='utf_16')
        fileinfo.write(szTitle+","+szStar+","+szReviewNum+","+szMeanPrice+","+szCategory+"," +szAddress+","+szTaste+","+szEvn+","+szService+"\n")

2、PyQuery解析网页

'''
Created on 20150203
@author: Hansen
'''

import urllib2
import sys
import io
from pyquery import PyQuery

#Fetch HTML from URL
def fecth_html(index,url,keepHtml,resultFile):
    req = urllib2.Request(url)
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0')
    rsp = urllib2.urlopen(req)
    content = rsp.read()
    #receive_header = rsp.info()
    #print(sys.getfilesystemencoding())
    #content = content.decode('utf-8','replace')
    
    if keepHtml:
        fileinfo = open(str(index)+'.html','w')
        fileinfo.write(content)
        print("save file "+ str(index)+'.html:    ok')
        
    parse_html(content,resultFile)
    
#Parse HTML
def parse_html(html,resultFile):
    doc = PyQuery(html)
    lis = doc('div.shop-all-list li')
    print(len(lis))
    for li in lis:
        li_doc = PyQuery(li)
        szTitle = li_doc('li div div a h4').text()
        szTitle = szTitle.replace("\r\n", "-").replace(" ","");
        szStar = li_doc("li div div span").filter('.sml-rank-stars').attr('title')
        szReviewNum = li_doc('li div div a').filter('.review-num').text()
        szReviewNum = szReviewNum.replace("\n", "").replace(" ","");
        szMeanPrice = li_doc('li div div a').filter('.mean-price').text()
        szMeanPrice = szMeanPrice.replace("\n", "").replace(" ","");
        szCategory = li_doc('li div div a span').filter('.tag').eq(1).text()
        szAddressA = li_doc('li div div a span').filter('.tag').eq(1).text()
        szAddressB = li_doc('li div div span').filter('.addr').eq(0).text()
        szAddress = (szAddressA+"-"+szAddressB).replace("\r\n", "-").replace(" ","");
        szTaste = li_doc('li div span span').eq(0).text()
        szEvn = li_doc('li div span span').eq(1).text()
        szService = li_doc('li div span span').eq(2).text()
        
        fileinfo = io.open(resultFile,'a',encoding='utf_16')
        fileinfo.write(szTitle+","+szStar+","+szReviewNum+","+szMeanPrice+","+szCategory+"," +szAddress+","+szTaste+","+szEvn+","+szService+"\n")

Python使用MySQL驱动

#!/usr/bin/python
# -*- coding: utf-8 -*-

import MySQLdb

db=MySQLdb.connect(host="127.0.0.1",port=3306,db="django",user="sa",passwd="sa")
cur=db.cursor()
cur.execute("select count(*) from djuser")
print("rowcount=",cur.rowcount)

rows=cur.fetchall()
for row in rows:
    print("%s" % (row[0]))

Python使用ODBC

#!/usr/bin/python
# -*- coding: utf-8 -*-

import ceODBC
con=ceODBC.connect('driver=MySQL ODBC 5.1 Driver;server=127.0.0.1;port=3306;database=django;uid=sa;pwd=sa;')
cur=ceODBC.Cursor(con)
cur.execute("SELECT count(*) FROM djuser")
rows=cur.fetchall()

for row in rows:
    print(row[0])

Python调用dll

1.Test.h

#ifndef TEST_INTADD_HEADER
#define TEST_INTADD_HEADER

extern "C" int WINAPIV IntAdd(int a,int b);

#endif

2.Test.cpp

#include <windows.h>
#include "Test.h"

BOOL WINAPI DllMain(HINSTANCE hinstDLL,DWORD fdwReason,LPVOID lpReserved)
{
  UNREFERENCED_PARAMETER(hinstDLL);
  UNREFERENCED_PARAMETER(lpReserved);

  switch(fdwReason) 
  { 
    case DLL_PROCESS_ATTACH:
      break;

    case DLL_THREAD_ATTACH:
      break;

    case DLL_THREAD_DETACH:
      break;

    case DLL_PROCESS_DETACH:
      break;
  }

  return TRUE;
}

extern "C" int WINAPIV IntAdd(int a,int b)
{
  return a+b;
}

3.Test.def

LIBRARY	"Test"

EXPORTS
	IntAdd

4.test_cdll.py

#test_cdll.py
#请用__cdecl调用约定而不是__stdcall

from ctypes import *

fileName="Test.dll"
Test=cdll.LoadLibrary(fileName)
print(Test.IntAdd(2,3))