diff options
| author | Lucas De Marchi <lucas.demarchi@profusion.mobi> | 2011-03-30 22:57:33 -0300 | 
|---|---|---|
| committer | Lucas De Marchi <lucas.demarchi@profusion.mobi> | 2011-03-31 11:26:23 -0300 | 
| commit | 25985edcedea6396277003854657b5f3cb31a628 (patch) | |
| tree | f026e810210a2ee7290caeb737c23cb6472b7c38 /net | |
| parent | 6aba74f2791287ec407e0f92487a725a25908067 (diff) | |
Fix common misspellings
Fixes generated by 'codespell' and manually reviewed.
Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
Diffstat (limited to 'net')
103 files changed, 160 insertions, 160 deletions
| diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index d1314cf18adf..d940c49d168a 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -54,7 +54,7 @@ static const char name_conf[]	 = "config";  /*   *	Structures for interfacing with the /proc filesystem. - *	VLAN creates its own directory /proc/net/vlan with the folowing + *	VLAN creates its own directory /proc/net/vlan with the following   *	entries:   *	config		device status/configuration   *	<device>	entry for each  device diff --git a/net/9p/client.c b/net/9p/client.c index 2ccbf04d37df..48b8e084e710 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -178,7 +178,7 @@ free_and_return:   * @tag: numeric id for transaction   *   * this is a simple array lookup, but will grow the - * request_slots as necessary to accomodate transaction + * request_slots as necessary to accommodate transaction   * ids which did not previously have a slot.   *   * this code relies on the client spinlock to manage locks, its diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index 9172ab78fcb0..d47880e971dd 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c @@ -36,7 +36,7 @@ p9_release_req_pages(struct trans_rpage_info *rpinfo)  EXPORT_SYMBOL(p9_release_req_pages);  /** - * p9_nr_pages - Return number of pages needed to accomodate the payload. + * p9_nr_pages - Return number of pages needed to accommodate the payload.   */  int  p9_nr_pages(struct p9_req_t *req) @@ -55,7 +55,7 @@ EXPORT_SYMBOL(p9_nr_pages);   * @req: Request to be sent to server.   * @pdata_off: data offset into the first page after translation (gup).   * @pdata_len: Total length of the IO. gup may not return requested # of pages. - * @nr_pages: number of pages to accomodate the payload + * @nr_pages: number of pages to accommodate the payload   * @rw: Indicates if the pages are for read or write.   */  int diff --git a/net/9p/util.c b/net/9p/util.c index b84619b5ba22..da6af81e59d9 100644 --- a/net/9p/util.c +++ b/net/9p/util.c @@ -67,7 +67,7 @@ EXPORT_SYMBOL(p9_idpool_create);  /**   * p9_idpool_destroy - create a new per-connection id pool - * @p: idpool to destory + * @p: idpool to destroy   */  void p9_idpool_destroy(struct p9_idpool *p) diff --git a/net/atm/br2684.c b/net/atm/br2684.c index fce2eae8d476..2252c2085dac 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -509,7 +509,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)  	write_lock_irq(&devs_lock);  	net_dev = br2684_find_dev(&be.ifspec);  	if (net_dev == NULL) { -		pr_err("tried to attach to non-existant device\n"); +		pr_err("tried to attach to non-existent device\n");  		err = -ENXIO;  		goto error;  	} diff --git a/net/atm/lec.h b/net/atm/lec.h index 9d14d196cc1d..dfc071966463 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -35,7 +35,7 @@ struct lecdatahdr_8025 {   * Operations that LANE2 capable device can do. Two first functions   * are used to make the device do things. See spec 3.1.3 and 3.1.4.   * - * The third function is intented for the MPOA component sitting on + * The third function is intended for the MPOA component sitting on   * top of the LANE device. The MPOA component assigns it's own function   * to (*associate_indicator)() and the LANE device will use that   * function to tell about TLVs it sees floating through. diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 9ed26140a269..824e1f6e50f2 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -474,7 +474,7 @@ void interface_rx(struct net_device *soft_iface,  		goto dropped;  	skb->protocol = eth_type_trans(skb, soft_iface); -	/* should not be neccesary anymore as we use skb_pull_rcsum() +	/* should not be necessary anymore as we use skb_pull_rcsum()  	 * TODO: please verify this and remove this TODO  	 * -- Dec 21st 2009, Simon Wunderlich */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index b372fb8bcdcf..42d5ff02cb59 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1877,7 +1877,7 @@ static void hci_tx_task(unsigned long arg)  	read_unlock(&hci_task_lock);  } -/* ----- HCI RX task (incoming data proccessing) ----- */ +/* ----- HCI RX task (incoming data processing) ----- */  /* ACL data packet */  static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index fc85e7ae33c7..36b9c5d0ebe3 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -679,7 +679,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch  		if (opt == BT_FLUSHABLE_OFF) {  			struct l2cap_conn *conn = l2cap_pi(sk)->conn; -			/* proceed futher only when we have l2cap_conn and +			/* proceed further only when we have l2cap_conn and  			   No Flush support in the LM */  			if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {  				err = -EINVAL; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 88485cc74dc3..cc4d3c5ab1c6 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -169,7 +169,7 @@ void br_fdb_flush(struct net_bridge *br)  	spin_unlock_bh(&br->hash_lock);  } -/* Flush all entries refering to a specific port. +/* Flush all entries referring to a specific port.   * if do_all is set also flush static entries   */  void br_fdb_delete_by_port(struct net_bridge *br, diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index cb43312b846e..3d9fca0e3370 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -106,7 +106,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)  /*   * Legacy ioctl's through SIOCDEVPRIVATE   * This interface is deprecated because it was too difficult to - * to do the translation for 32/64bit ioctl compatability. + * to do the translation for 32/64bit ioctl compatibility.   */  static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)  { diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 8184c031d028..37a4034dfc29 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -852,7 +852,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,  	sock->state = SS_CONNECTING;  	sk->sk_state = CAIF_CONNECTING; -	/* Check priority value comming from socket */ +	/* Check priority value coming from socket */  	/* if priority value is out of range it will be ajusted */  	if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)  		cf_sk->conn_req.priority = CAIF_PRIO_MAX; diff --git a/net/can/bcm.c b/net/can/bcm.c index 871a0ad51025..57b1aed79014 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -387,7 +387,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)  }  /* - * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions + * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions   */  static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)  { diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 02212ed50852..8d4ee7e01793 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -917,7 +917,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);  /*   * Pick an osd (the first 'up' osd in the pg), allocate the osd struct   * (as needed), and set the request r_osd appropriately.  If there is - * no up osd, set r_osd to NULL.  Move the request to the appropiate list + * no up osd, set r_osd to NULL.  Move the request to the appropriate list   * (unsent, homeless) or leave on in-flight lru.   *   * Return 0 if unchanged, 1 if changed, or negative on error. diff --git a/net/core/dev.c b/net/core/dev.c index 563ddc28139d..56c3e00098c0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2071,7 +2071,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,  		u32 features;  		/* -		 * If device doesnt need skb->dst, release it right now while +		 * If device doesn't need skb->dst, release it right now while  		 * its hot in this cpu cache  		 */  		if (dev->priv_flags & IFF_XMIT_DST_RELEASE) @@ -2131,7 +2131,7 @@ gso:  		nskb->next = NULL;  		/* -		 * If device doesnt need nskb->dst, release it right now while +		 * If device doesn't need nskb->dst, release it right now while  		 * its hot in this cpu cache  		 */  		if (dev->priv_flags & IFF_XMIT_DST_RELEASE) @@ -2950,8 +2950,8 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);   * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions   * a compare and 2 stores extra right now if we dont have it on   * but have CONFIG_NET_CLS_ACT - * NOTE: This doesnt stop any functionality; if you dont have - * the ingress scheduler, you just cant add policies on ingress. + * NOTE: This doesn't stop any functionality; if you dont have + * the ingress scheduler, you just can't add policies on ingress.   *   */  static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) @@ -3780,7 +3780,7 @@ static void net_rx_action(struct softirq_action *h)  		 * with netpoll's poll_napi().  Only the entity which  		 * obtains the lock and sees NAPI_STATE_SCHED set will  		 * actually make the ->poll() call.  Therefore we avoid -		 * accidently calling ->poll() when NAPI is not scheduled. +		 * accidentally calling ->poll() when NAPI is not scheduled.  		 */  		work = 0;  		if (test_bit(NAPI_STATE_SCHED, &n->state)) { @@ -6316,7 +6316,7 @@ static void __net_exit default_device_exit(struct net *net)  		if (dev->rtnl_link_ops)  			continue; -		/* Push remaing network devices to init_net */ +		/* Push remaining network devices to init_net */  		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);  		err = dev_change_net_namespace(dev, &init_net, fb_name);  		if (err) { diff --git a/net/core/filter.c b/net/core/filter.c index 232b1873bb28..afb8afb066bb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -425,7 +425,7 @@ EXPORT_SYMBOL(sk_run_filter);   * As we dont want to clear mem[] array for each packet going through   * sk_run_filter(), we check that filter loaded by user never try to read   * a cell if not previously written, and we check all branches to be sure - * a malicious user doesnt try to abuse us. + * a malicious user doesn't try to abuse us.   */  static int check_load_and_stores(struct sock_filter *filter, int flen)  { diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 01a1101b5936..a7b342131869 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -129,7 +129,7 @@ static void linkwatch_schedule_work(int urgent)  	if (!cancel_delayed_work(&linkwatch_work))  		return; -	/* Otherwise we reschedule it again for immediate exection. */ +	/* Otherwise we reschedule it again for immediate execution. */  	schedule_delayed_work(&linkwatch_work, 0);  } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 49f7ea5b4c75..d7c4bb4b1820 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -196,7 +196,7 @@ EXPORT_SYMBOL_GPL(__rtnl_register);   * as failure of this function is very unlikely, it can only happen due   * to lack of memory when allocating the chain to store all message   * handlers for a protocol. Meant for use in init functions where lack - * of memory implies no sense in continueing. + * of memory implies no sense in continuing.   */  void rtnl_register(int protocol, int msgtype,  		   rtnl_doit_func doit, rtnl_dumpit_func dumpit) @@ -1440,7 +1440,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,  errout:  	if (err < 0 && modified && net_ratelimit())  		printk(KERN_WARNING "A link change request failed with " -		       "some changes comitted already. Interface %s may " +		       "some changes committed already. Interface %s may "  		       "have been left with an inconsistent configuration, "  		       "please check.\n", dev->name); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 801dd08908f9..7ebeed0a877c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2267,7 +2267,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);   * of bytes already consumed and the next call to   * skb_seq_read() will return the remaining part of the block.   * - * Note 1: The size of each block of data returned can be arbitary, + * Note 1: The size of each block of data returned can be arbitrary,   *       this limitation is the cost for zerocopy seqeuental   *       reads of potentially non linear data.   * diff --git a/net/core/sock.c b/net/core/sock.c index 7dfed792434d..6e819780c232 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -215,7 +215,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;  __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;  __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; -/* Maximal space eaten by iovec or ancilliary data plus some space */ +/* Maximal space eaten by iovec or ancillary data plus some space */  int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);  EXPORT_SYMBOL(sysctl_optmem_max); @@ -1175,7 +1175,7 @@ static void __sk_free(struct sock *sk)  void sk_free(struct sock *sk)  {  	/* -	 * We substract one from sk_wmem_alloc and can know if +	 * We subtract one from sk_wmem_alloc and can know if  	 * some packets are still in some tx queue.  	 * If not null, sock_wfree() will call __sk_free(sk) later  	 */ @@ -1185,10 +1185,10 @@ void sk_free(struct sock *sk)  EXPORT_SYMBOL(sk_free);  /* - * Last sock_put should drop referrence to sk->sk_net. It has already - * been dropped in sk_change_net. Taking referrence to stopping namespace + * Last sock_put should drop reference to sk->sk_net. It has already + * been dropped in sk_change_net. Taking reference to stopping namespace   * is not an option. - * Take referrence to a socket to remove it from hash _alive_ and after that + * Take reference to a socket to remove it from hash _alive_ and after that   * destroy it in the context of init_net.   */  void sk_release_kernel(struct sock *sk) diff --git a/net/dccp/output.c b/net/dccp/output.c index 784d30210543..136d41cbcd02 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -143,7 +143,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)  }  /** - * dccp_determine_ccmps  -  Find out about CCID-specfic packet-size limits + * dccp_determine_ccmps  -  Find out about CCID-specific packet-size limits   * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),   * since the RX CCID is restricted to feedback packets (Acks), which are small   * in comparison with the data traffic. A value of 0 means "no current CCMPS". diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c index bb2b41bc854e..d951f93644bf 100644 --- a/net/dsa/mv88e6131.c +++ b/net/dsa/mv88e6131.c @@ -124,7 +124,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)  	 * Ignore removed tag data on doubly tagged packets, disable  	 * flow control messages, force flow control priority to the  	 * highest, and send all special multicast frames to the CPU -	 * port at the higest priority. +	 * port at the highest priority.  	 */  	REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 094e150c6260..a0af7ea87870 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -112,7 +112,7 @@ int cipso_v4_rbm_strictvalid = 1;  /* The maximum number of category ranges permitted in the ranged category tag   * (tag #5).  You may note that the IETF draft states that the maximum number   * of category ranges is 7, but if the low end of the last category range is - * zero then it is possibile to fit 8 category ranges because the zero should + * zero then it is possible to fit 8 category ranges because the zero should   * be omitted. */  #define CIPSO_V4_TAG_RNG_CAT_MAX      8 @@ -438,7 +438,7 @@ cache_add_failure:   *   * Description:   * Search the DOI definition list for a DOI definition with a DOI value that - * matches @doi.  The caller is responsibile for calling rcu_read_[un]lock(). + * matches @doi.  The caller is responsible for calling rcu_read_[un]lock().   * Returns a pointer to the DOI definition on success and NULL on failure.   */  static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) @@ -1293,7 +1293,7 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,  			return ret_val;  		/* This will send packets using the "optimized" format when -		 * possibile as specified in  section 3.4.2.6 of the +		 * possible as specified in  section 3.4.2.6 of the  		 * CIPSO draft. */  		if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)  			tag_len = 14; @@ -1752,7 +1752,7 @@ validate_return:  }  /** - * cipso_v4_error - Send the correct reponse for a bad packet + * cipso_v4_error - Send the correct response for a bad packet   * @skb: the packet   * @error: the error code   * @gateway: CIPSO gateway flag diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b92c86f6e9b3..e9013d6c1f51 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -12,7 +12,7 @@   *   *   Hans Liss <hans.liss@its.uu.se>  Uppsala Universitet   * - * This work is based on the LPC-trie which is originally descibed in: + * This work is based on the LPC-trie which is originally described in:   *   * An experimental study of compression methods for dynamic tries   * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index a91dc1611081..e5f8a71d3a2a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -704,7 +704,7 @@ static void icmp_unreach(struct sk_buff *skb)  	 */  	/* -	 *	Check the other end isnt violating RFC 1122. Some routers send +	 *	Check the other end isn't violating RFC 1122. Some routers send  	 *	bogus responses to broadcast frames. If you see this message  	 *	first check your netmask matches at both ends, if it does then  	 *	get the other vendor to fix their kit. diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 67f241b97649..459c011b1d4a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -603,7 +603,7 @@ slow_path:  		/* IF: it doesn't fit, use 'mtu' - the data space left */  		if (len > mtu)  			len = mtu; -		/* IF: we are not sending upto and including the packet end +		/* IF: we are not sending up to and including the packet end  		   then align the next start on an eight byte boundary */  		if (len < left)	{  			len &= ~7; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 2b097752426b..cbff2ecccf3d 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1444,7 +1444,7 @@ static int __init ip_auto_config(void)  		root_server_addr = addr;  	/* -	 * Use defaults whereever applicable. +	 * Use defaults wherever applicable.  	 */  	if (ic_defaults() < 0)  		return -1; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4b5d457c2d76..89bc7e66d598 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -76,7 +76,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,  }  /* - * Unfortunatly, _b and _mask are not aligned to an int (or long int) + * Unfortunately, _b and _mask are not aligned to an int (or long int)   * Some arches dont care, unrolling the loop is a win on them.   * For other arches, we only have a 16bit alignement.   */ @@ -1874,7 +1874,7 @@ static int __init arp_tables_init(void)  	if (ret < 0)  		goto err1; -	/* Noone else will be downing sem now, so we won't sleep */ +	/* No one else will be downing sem now, so we won't sleep */  	ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));  	if (ret < 0)  		goto err2; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index ffcea0d1678e..704915028009 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -2233,7 +2233,7 @@ static int __init ip_tables_init(void)  	if (ret < 0)  		goto err1; -	/* Noone else will be downing sem now, so we won't sleep */ +	/* No one else will be downing sem now, so we won't sleep */  	ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));  	if (ret < 0)  		goto err2; diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 21bcf471b25a..9c71b2755ce3 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -521,7 +521,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)  }  EXPORT_SYMBOL(nf_nat_protocol_register); -/* Noone stores the protocol anywhere; simply delete it. */ +/* No one stores the protocol anywhere; simply delete it. */  void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)  {  	spin_lock_bh(&nf_nat_lock); @@ -532,7 +532,7 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)  }  EXPORT_SYMBOL(nf_nat_protocol_unregister); -/* Noone using conntrack by the time this called. */ +/* No one using conntrack by the time this called. */  static void nf_nat_cleanup_conntrack(struct nf_conn *ct)  {  	struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2d3c72e5bbbf..bceaec42c37d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -622,7 +622,7 @@ do_confirm:  static void raw_close(struct sock *sk, long timeout)  {  	/* -	 * Raw sockets may have direct kernel refereneces. Kill them. +	 * Raw sockets may have direct kernel references. Kill them.  	 */  	ip_ra_control(sk, 0, NULL); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4b0c81180804..ea107515c53e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -821,7 +821,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)  }  /* - * Pertubation of rt_genid by a small quantity [1..256] + * Perturbation of rt_genid by a small quantity [1..256]   * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()   * many times (2^24) without giving recent rt_genid.   * Jenkins hash is strong enough that litle changes of rt_genid are OK. @@ -1191,7 +1191,7 @@ restart:  #endif  	/*  	 * Since lookup is lockfree, we must make sure -	 * previous writes to rt are comitted to memory +	 * previous writes to rt are committed to memory  	 * before making rt visible to other CPUS.  	 */  	rcu_assign_pointer(rt_hash_table[hash].chain, rt); diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 656d431c99ad..72f7218b03f5 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -12,7 +12,7 @@   *     within cong_avoid.   *   o Error correcting in remote HZ, therefore remote HZ will be keeped   *     on checking and updating. - *   o Handling calculation of One-Way-Delay (OWD) within rtt_sample, sicne + *   o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since   *     OWD have a similar meaning as RTT. Also correct the buggy formular.   *   o Handle reaction for Early Congestion Indication (ECI) within   *     pkts_acked, as mentioned within pseudo code. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dfa5beb0c1c8..64f30eca1c67 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -73,7 +73,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)  	tcp_advance_send_head(sk, skb);  	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; -	/* Don't override Nagle indefinately with F-RTO */ +	/* Don't override Nagle indefinitely with F-RTO */  	if (tp->frto_counter == 2)  		tp->frto_counter = 3; diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index dc7f43179c9a..05c3b6f0e8e1 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -20,7 +20,7 @@  #define TCP_YEAH_DELTA        3 //log minimum fraction of cwnd to be removed on loss  #define TCP_YEAH_EPSILON      1 //log maximum fraction to be removed on early decongestion  #define TCP_YEAH_PHY          8 //lin maximum delta from base -#define TCP_YEAH_RHO         16 //lin minumum number of consecutive rtt to consider competition on loss +#define TCP_YEAH_RHO         16 //lin minimum number of consecutive rtt to consider competition on loss  #define TCP_YEAH_ZETA        50 //lin minimum number of state switchs to reset reno_count  #define TCP_SCALABLE_AI_CNT	 100U diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 588f47af5faf..f87a8eb76f3b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -189,7 +189,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,   *  @sk:          socket struct in question   *  @snum:        port number to look up   *  @saddr_comp:  AF-dependent comparison of bound local IP addresses - *  @hash2_nulladdr: AF-dependant hash value in secondary hash chains, + *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,   *                   with NULL address   */  int udp_lib_get_port(struct sock *sk, unsigned short snum, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3daaf3c7703c..1493534116df 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1084,7 +1084,7 @@ static int ipv6_get_saddr_eval(struct net *net,  	case IPV6_SADDR_RULE_PRIVACY:  	    {  		/* Rule 7: Prefer public address -		 * Note: prefer temprary address if use_tempaddr >= 2 +		 * Note: prefer temporary address if use_tempaddr >= 2  		 */  		int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?  				!!(dst->prefs & IPV6_PREFER_SRC_TMP) : @@ -1968,7 +1968,7 @@ ok:  					 *  to the stored lifetime since we'll  					 *  be updating the timestamp below,  					 *  else we'll set it back to the -					 *  minumum. +					 *  minimum.  					 */  					if (prefered_lft != ifp->prefered_lft) {  						valid_lft = stored_lft; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 4b13d5d8890e..afcc7099f96d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1113,7 +1113,7 @@ static int __init inet6_init(void)  	/*  	 *	ipngwg API draft makes clear that the correct semantics  	 *	for TCP and UDP is to consider one TCP and UDP instance -	 *	in a host availiable by both INET and INET6 APIs and +	 *	in a host available by both INET and INET6 APIs and  	 *	able to communicate via both network protocols.  	 */ diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 18208876aa8a..46cf7bea6769 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -779,7 +779,7 @@ slow_path:  		/* IF: it doesn't fit, use 'mtu' - the data space left */  		if (len > mtu)  			len = mtu; -		/* IF: we are not sending upto and including the packet end +		/* IF: we are not sending up to and including the packet end  		   then align the next start on an eight byte boundary */  		if (len < left)	{  			len &= ~7; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 0b2af9b85cec..5a1c6f27ffaf 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -2248,7 +2248,7 @@ static int __init ip6_tables_init(void)  	if (ret < 0)  		goto err1; -	/* Noone else will be downing sem now, so we won't sleep */ +	/* No one else will be downing sem now, so we won't sleep */  	ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));  	if (ret < 0)  		goto err2; diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 97c5b21b9674..cdd6d045e42e 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -71,7 +71,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,  	if (reasm == NULL)  		return NF_STOLEN; -	/* error occured or not fragmented */ +	/* error occurred or not fragmented */  	if (reasm == skb)  		return NF_ACCEPT; diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 783c5f367d29..005b424494a0 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -165,7 +165,7 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,  	irlap_apply_default_connection_parameters(self); -	self->N3 = 3; /* # connections attemts to try before giving up */ +	self->N3 = 3; /* # connections attempts to try before giving up */  	self->state = LAP_NDM; diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index d434c8880745..bb47021c9a55 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -708,7 +708,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,  				self->frame_sent = TRUE;  			} -			/* Readjust our timer to accomodate devices +			/* Readjust our timer to accommodate devices  			 * doing faster or slower discovery than us...  			 * Jean II */  			irlap_start_query_timer(self, info->S, info->s); @@ -931,7 +931,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,  		irlap_send_rr_frame(self, CMD_FRAME);  		/* The timer is set to half the normal timer to quickly -		 * detect a failure to negociate the new connection +		 * detect a failure to negotiate the new connection  		 * parameters. IrLAP 6.11.3.2, note 3.  		 * Note that currently we don't process this failure  		 * properly, as we should do a quick disconnect. @@ -1052,7 +1052,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,  				return -EPROTO;  			} -			/* Substract space used by this skb */ +			/* Subtract space used by this skb */  			self->bytes_left -= skb->len;  #else	/* CONFIG_IRDA_DYNAMIC_WINDOW */  			/* Window has been adjusted for the max packet @@ -1808,7 +1808,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,  				return -EPROTO; /* Try again later */  			} -			/* Substract space used by this skb */ +			/* Subtract space used by this skb */  			self->bytes_left -= skb->len;  #else	/* CONFIG_IRDA_DYNAMIC_WINDOW */  			/* Window has been adjusted for the max packet diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 688222cbf55b..8c004161a843 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -848,7 +848,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)  	 * though IrLAP is currently sending the *last* frame of the  	 * tx-window, the driver most likely has only just started  	 * sending the *first* frame of the same tx-window. -	 * I.e. we are always at the very begining of or Tx window. +	 * I.e. we are always at the very beginning of or Tx window.  	 * Now, we are supposed to set the final timer from the end  	 * of our tx-window to let the other peer reply. So, we need  	 * to add extra time to compensate for the fact that we diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c index c1fb5db81042..9505a7d06f1a 100644 --- a/net/irda/irlmp_event.c +++ b/net/irda/irlmp_event.c @@ -498,7 +498,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,  	switch (event) {  #ifdef CONFIG_IRDA_ULTRA  	case LM_UDATA_INDICATION: -		/* This is most bizzare. Those packets are  aka unreliable +		/* This is most bizarre. Those packets are  aka unreliable  		 * connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA.  		 * Why do we pass them as Ultra ??? Jean II */  		irlmp_connless_data_indication(self, skb); diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index 0d82ff5aeff1..979ecb2435a7 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h @@ -73,7 +73,7 @@   * Infinite thanks to those brave souls for providing the infrastructure   * upon which IrNET is built.   * - * Thanks to all my collegues in HP for helping me. In particular, + * Thanks to all my colleagues in HP for helping me. In particular,   * thanks to Salil Pradhan and Bill Serra for W2k testing...   * Thanks to Luiz Magalhaes for irnetd and much testing...   * diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index 849aaf0dabb5..9715e6e5900b 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c @@ -40,7 +40,7 @@   *	o the hash function for ints is pathetic (but could be changed)   *	o locking is sometime suspicious (especially during enumeration)   *	o most users have only a few elements (== overhead) - *	o most users never use seach, so don't benefit from hashing + *	o most users never use search, so don't benefit from hashing   * Problem already fixed :   *	o not 64 bit compliant (most users do hashv = (int) self)   *	o hashbin_remove() is broken => use hashbin_remove_this() diff --git a/net/irda/irttp.c b/net/irda/irttp.c index f6054f9ccbe3..9d9af4606970 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -1193,7 +1193,7 @@ EXPORT_SYMBOL(irttp_connect_request);  /*   * Function irttp_connect_confirm (handle, qos, skb)   * - *    Sevice user confirms TSAP connection with peer. + *    Service user confirms TSAP connection with peer.   *   */  static void irttp_connect_confirm(void *instance, void *sap, diff --git a/net/irda/qos.c b/net/irda/qos.c index 2b00974e5bae..1b51bcf42394 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -39,16 +39,16 @@  #include <net/irda/irlap_frame.h>  /* - * Maximum values of the baud rate we negociate with the other end. + * Maximum values of the baud rate we negotiate with the other end.   * Most often, you don't have to change that, because Linux-IrDA will   * use the maximum offered by the link layer, which usually works fine.   * In some very rare cases, you may want to limit it to lower speeds...   */  int sysctl_max_baud_rate = 16000000;  /* - * Maximum value of the lap disconnect timer we negociate with the other end. + * Maximum value of the lap disconnect timer we negotiate with the other end.   * Most often, the value below represent the best compromise, but some user - * may want to keep the LAP alive longuer or shorter in case of link failure. + * may want to keep the LAP alive longer or shorter in case of link failure.   * Remember that the threshold time (early warning) is fixed to 3s...   */  int sysctl_max_noreply_time = 12; @@ -411,7 +411,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)  	 * Fix tx data size according to user limits - Jean II  	 */  	if (qos->data_size.value > sysctl_max_tx_data_size) -		/* Allow non discrete adjustement to avoid loosing capacity */ +		/* Allow non discrete adjustement to avoid losing capacity */  		qos->data_size.value = sysctl_max_tx_data_size;  	/*  	 * Override Tx window if user request it. - Jean II diff --git a/net/irda/timer.c b/net/irda/timer.c index 0335ba0cc593..f418cb2ad49c 100644 --- a/net/irda/timer.c +++ b/net/irda/timer.c @@ -59,7 +59,7 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)  	 * slot time, plus add some extra time to properly receive the last  	 * discovery packet (which is longer due to extra discovery info),  	 * to avoid messing with for incomming connections requests and -	 * to accomodate devices that perform discovery slower than us. +	 * to accommodate devices that perform discovery slower than us.  	 * Jean II */  	timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)  		   + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 9637e45744fa..986b2a5e8769 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -250,7 +250,7 @@ static struct device *af_iucv_dev;   *	PRMDATA[0..6]	socket data (max 7 bytes);   *	PRMDATA[7]	socket data length value (len is 0xff - PRMDATA[7])   * - * The socket data length is computed by substracting the socket data length + * The socket data length is computed by subtracting the socket data length   * value from 0xFF.   * If the socket data len is greater 7, then PRMDATA can be used for special   * notifications (see iucv_sock_shutdown); and further, diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 1ee5dab3cfae..8f156bd86be7 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -735,7 +735,7 @@ static void iucv_cleanup_queue(void)  	struct iucv_irq_list *p, *n;  	/* -	 * When a path is severed, the pathid can be reused immediatly +	 * When a path is severed, the pathid can be reused immediately  	 * on a iucv connect or a connection pending interrupt. Remove  	 * all entries from the task queue that refer to a stale pathid  	 * (iucv_path_table[ix] == NULL). Only then do the iucv connect @@ -807,7 +807,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp)  	spin_lock_bh(&iucv_table_lock);  	/* Remove handler from the iucv_handler_list. */  	list_del_init(&handler->list); -	/* Sever all pathids still refering to the handler. */ +	/* Sever all pathids still referring to the handler. */  	list_for_each_entry_safe(p, n, &handler->paths, list) {  		iucv_sever_pathid(p->pathid, NULL);  		iucv_path_table[p->pathid] = NULL; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a40401701424..c18396c248d7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -97,7 +97,7 @@ struct ieee80211_bss {  	size_t supp_rates_len;  	/* -	 * During assocation, we save an ERP value from a probe response so +	 * During association, we save an ERP value from a probe response so  	 * that we can feed ERP info to the driver when handling the  	 * association completes. these fields probably won't be up-to-date  	 * otherwise, you probably don't want to use them. diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 8d65b47d9837..336ca9d0c5c4 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -628,7 +628,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,   *   * @mpath: mesh path whose queue has to be freed   * - * Locking: the function must me called withing a rcu_read_lock region + * Locking: the function must me called within a rcu_read_lock region   */  void mesh_path_flush_pending(struct mesh_path *mpath)  { diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 8212a8bebf06..78e67d22dc1f 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -259,7 +259,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)  		}  	} -	/* try to sample up to half of the availble rates during each interval */ +	/* try to sample up to half of the available rates during each interval */  	mi->sample_count *= 4;  	cur_prob = 0; diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h index 6510f8ee738e..19111c7bf454 100644 --- a/net/mac80211/rc80211_pid.h +++ b/net/mac80211/rc80211_pid.h @@ -77,7 +77,7 @@ union rc_pid_event_data {  };  struct rc_pid_event { -	/* The time when the event occured */ +	/* The time when the event occurred */  	unsigned long timestamp;  	/* Event ID number */ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5c1930ba8ebe..c50b68423c7b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -381,7 +381,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)   * specs were sane enough this time around to require padding each A-MSDU   * subframe to a length that is a multiple of four.   * - * Padding like Atheros hardware adds which is inbetween the 802.11 header and + * Padding like Atheros hardware adds which is between the 802.11 header and   * the payload is not supported, the driver is required to move the 802.11   * header to be directly in front of the payload in that case.   */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d0311a322ddd..13e8c30adf01 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -47,9 +47,9 @@   * Station entries are added by mac80211 when you establish a link with a   * peer. This means different things for the different type of interfaces   * we support. For a regular station this mean we add the AP sta when we - * receive an assocation response from the AP. For IBSS this occurs when + * receive an association response from the AP. For IBSS this occurs when   * get to know about a peer on the same IBSS. For WDS we add the sta for - * the peer imediately upon device open. When using AP mode we add stations + * the peer immediately upon device open. When using AP mode we add stations   * for each respective station upon request from userspace through nl80211.   *   * In order to remove a STA info structure, various sta_info_destroy_*() diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 57681149e37f..b2f95966c7f4 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -173,7 +173,7 @@ struct sta_ampdu_mlme {  /**   * enum plink_state - state of a mesh peer link finite state machine   * - * @PLINK_LISTEN: initial state, considered the implicit state of non existant + * @PLINK_LISTEN: initial state, considered the implicit state of non existent   * 	mesh peer links   * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer   * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index d6b48230a540..253326e8d990 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -893,7 +893,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,  	to = ip_set_list[to_id];  	/* Features must not change. -	 * Not an artifical restriction anymore, as we must prevent +	 * Not an artificial restriction anymore, as we must prevent  	 * possible loops created by swapping in setlist type of sets. */  	if (!(from->type->features == to->type->features &&  	      from->type->family == to->type->family)) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index f289306cbf12..c97bd45975be 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -595,7 +595,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)  			atomic_inc(&dest->inactconns);  	} else {  		/* It is a persistent connection/template, so increase -		   the peristent connection counter */ +		   the persistent connection counter */  		atomic_inc(&dest->persistconns);  	} @@ -657,7 +657,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)  		}  	} else {  		/* It is a persistent connection/template, so decrease -		   the peristent connection counter */ +		   the persistent connection counter */  		atomic_dec(&dest->persistconns);  	} diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index f276df9896b3..87e40ea77a95 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -131,7 +131,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)  {  	list_del(&en->list);  	/* -	 * We don't kfree dest because it is refered either by its service +	 * We don't kfree dest because it is referred either by its service  	 * or the trash dest list.  	 */  	atomic_dec(&en->dest->refcnt); diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index cb1c9913d38b..90f618ab6dda 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -152,7 +152,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)  	write_lock(&set->lock);  	list_for_each_entry_safe(e, ep, &set->list, list) {  		/* -		 * We don't kfree dest because it is refered either +		 * We don't kfree dest because it is referred either  		 * by its service or by the trash dest list.  		 */  		atomic_dec(&e->dest->refcnt); diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index b027ccc49f43..d12ed53ec95f 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -566,7 +566,7 @@ static struct ipvs_sctp_nextstate  	 * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server  	 */  	/* -	 * We recieved the data chuck, keep the state unchanged. I assume +	 * We received the data chuck, keep the state unchanged. I assume  	 * that still data chuncks  can be received by both the peers in  	 * SHUDOWN state  	 */ @@ -633,7 +633,7 @@ static struct ipvs_sctp_nextstate  	 * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client  	 */  	/* -	 * We recieved the data chuck, keep the state unchanged. I assume +	 * We received the data chuck, keep the state unchanged. I assume  	 * that still data chuncks  can be received by both the peers in  	 * SHUDOWN state  	 */ @@ -701,7 +701,7 @@ static struct ipvs_sctp_nextstate  	 * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server  	 */  	/* -	 * We recieved the data chuck, keep the state unchanged. I assume +	 * We received the data chuck, keep the state unchanged. I assume  	 * that still data chuncks  can be received by both the peers in  	 * SHUDOWN state  	 */ @@ -771,7 +771,7 @@ static struct ipvs_sctp_nextstate  	 * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client  	 */  	/* -	 * We recieved the data chuck, keep the state unchanged. I assume +	 * We received the data chuck, keep the state unchanged. I assume  	 * that still data chuncks  can be received by both the peers in  	 * SHUDOWN state  	 */ diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 941286ca911d..2e1c11f78419 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -453,7 +453,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)  	   REJECT will give spurious warnings here. */  	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ -	/* No external references means noone else could have +	/* No external references means no one else could have  	   confirmed us. */  	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));  	pr_debug("Confirming conntrack %p\n", ct); @@ -901,7 +901,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,  	ret = l3proto->get_l4proto(skb, skb_network_offset(skb),  				   &dataoff, &protonum);  	if (ret <= 0) { -		pr_debug("not prepared to track yet or error occured\n"); +		pr_debug("not prepared to track yet or error occurred\n");  		NF_CT_STAT_INC_ATOMIC(net, error);  		NF_CT_STAT_INC_ATOMIC(net, invalid);  		ret = -ret; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 9ae57c57c50e..2e664a69d7db 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -98,7 +98,7 @@ static const char * const dccp_state_names[] = {  #define sIV	CT_DCCP_INVALID  /* - * DCCP state transistion table + * DCCP state transition table   *   * The assumption is the same as for TCP tracking:   * diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 6f4ee70f460b..6772b1154654 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -107,9 +107,9 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {  /* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},  /* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},  /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, -/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/ +/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/  /* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ -/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */ +/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */  /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}  	},  	{ @@ -121,7 +121,7 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {  /* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},  /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},  /* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, -/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */ +/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */  /* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},  /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}  	} diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index bcf47eb518ef..237cc1981b89 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -707,7 +707,7 @@ static const char *ct_sdp_header_search(const char *dptr, const char *limit,  }  /* Locate a SDP header (optionally a substring within the header value), - * optionally stopping at the first occurence of the term header, parse + * optionally stopping at the first occurrence of the term header, parse   * it and return the offset and length of the data we're interested in.   */  int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5ab22e2bbd7d..5b466cd1272f 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -134,7 +134,7 @@ static int __nf_queue(struct sk_buff *skb,  	const struct nf_afinfo *afinfo;  	const struct nf_queue_handler *qh; -	/* QUEUE == DROP if noone is waiting, to be safe. */ +	/* QUEUE == DROP if no one is waiting, to be safe. */  	rcu_read_lock();  	qh = rcu_dereference(queue_handler[pf]); diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index d37b7f80fa37..de0d8e4cbfb6 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -109,7 +109,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)   *   * Description:   * This is the hashing function for the domain hash table, it returns the - * correct bucket number for the domain.  The caller is responsibile for + * correct bucket number for the domain.  The caller is responsible for   * ensuring that the hash table is protected with either a RCU read lock or the   * hash table lock.   * @@ -134,7 +134,7 @@ static u32 netlbl_domhsh_hash(const char *key)   *   * Description:   * Searches the domain hash table and returns a pointer to the hash table - * entry if found, otherwise NULL is returned.  The caller is responsibile for + * entry if found, otherwise NULL is returned.  The caller is responsible for   * ensuring that the hash table is protected with either a RCU read lock or the   * hash table lock.   * @@ -165,7 +165,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)   * Searches the domain hash table and returns a pointer to the hash table   * entry if an exact match is found, if an exact match is not present in the   * hash table then the default entry is returned if valid otherwise NULL is - * returned.  The caller is responsibile ensuring that the hash table is + * returned.  The caller is responsible ensuring that the hash table is   * protected with either a RCU read lock or the hash table lock.   *   */ @@ -193,7 +193,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)   *   * Description:   * Generate an audit record for adding a new NetLabel/LSM mapping entry with - * the given information.  Caller is responsibile for holding the necessary + * the given information.  Caller is responsible for holding the necessary   * locks.   *   */ @@ -605,7 +605,7 @@ int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info)   *   * Description:   * Look through the domain hash table searching for an entry to match @domain, - * return a pointer to a copy of the entry or NULL.  The caller is responsibile + * return a pointer to a copy of the entry or NULL.  The caller is responsible   * for ensuring that rcu_read_[un]lock() is called.   *   */ diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 998e85e895d0..4f251b19fbcc 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -259,7 +259,7 @@ add_failure:   *   * Description:   * This function is a helper function used by the LISTALL and LISTDEF command - * handlers.  The caller is responsibile for ensuring that the RCU read lock + * handlers.  The caller is responsible for ensuring that the RCU read lock   * is held.  Returns zero on success, negative values on failure.   *   */ diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index c47a511f203d..7c4dce8fa5e6 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -355,7 +355,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)   *   * Conceptually, we have two counters:   *  -	send credits: this tells us how many WRs we're allowed - *	to submit without overruning the reciever's queue. For + *	to submit without overruning the receiver's queue. For   *	each SEND WR we post, we decrement this by one.   *   *  -	posted credits: this tells us how many WRs we recently diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 712cf2d1f28e..3a60a15d1b4a 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -181,7 +181,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,  	unsigned int send_size, recv_size;  	int ret; -	/* The offset of 1 is to accomodate the additional ACK WR. */ +	/* The offset of 1 is to accommodate the additional ACK WR. */  	send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);  	recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);  	rds_iw_ring_resize(send_ring, send_size - 1); diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 59509e9a9e72..6deaa77495e3 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c @@ -122,7 +122,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd  #else  			/* FIXME - needs to compare the local and remote  			 * ipaddr/port tuple, but the ipaddr is the only -			 * available infomation in the rds_sock (as the rest are +			 * available information in the rds_sock (as the rest are  			 * zero'ed.  It doesn't appear to be properly populated  			 * during connection setup...  			 */ diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 6280ea020d4e..545d8ee3efb1 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c @@ -307,7 +307,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)   *   * Conceptually, we have two counters:   *  -	send credits: this tells us how many WRs we're allowed - *	to submit without overruning the reciever's queue. For + *	to submit without overruning the receiver's queue. For   *	each SEND WR we post, we decrement this by one.   *   *  -	posted credits: this tells us how many WRs we recently diff --git a/net/rds/send.c b/net/rds/send.c index 35b9c2e9caf1..d58ae5f9339e 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -116,7 +116,7 @@ static void release_in_xmit(struct rds_connection *conn)  }  /* - * We're making the concious trade-off here to only send one message + * We're making the conscious trade-off here to only send one message   * down the connection at a time.   *   Pro:   *      - tx queueing is a simple fifo list diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 08dcd2f29cdc..479cae57d187 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -587,7 +587,7 @@ static int rose_clear_routes(void)  /*   *	Check that the device given is a valid AX.25 interface that is "up". - * 	called whith RTNL + * 	called with RTNL   */  static struct net_device *rose_ax25_dev_find(char *devname)  { diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 15873e14cb54..14b42f4ad791 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -999,7 +999,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)  	switch (n->nlmsg_type) {  	case RTM_NEWACTION:  		/* we are going to assume all other flags -		 * imply create only if it doesnt exist +		 * imply create only if it doesn't exist  		 * Note that CREATE | EXCL implies that  		 * but since we want avoid ambiguity (eg when flags  		 * is zero) then just set this diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 50c7c06c019d..7affe9a92757 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -161,7 +161,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,  			}  			if (offset > 0 && offset > skb->len) {  				pr_info("tc filter pedit" -					" offset %d cant exceed pkt length %d\n", +					" offset %d can't exceed pkt length %d\n",  				       offset, skb->len);  				goto bad;  			} diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index a4de67eca824..49130e8abff0 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -47,7 +47,7 @@   * 	on the meta type. Obviously, the length of the data must also   * 	be provided for non-numeric types.   * - * 	Additionaly, type dependant modifiers such as shift operators + * 	Additionally, type dependent modifiers such as shift operators   * 	or mask may be applied to extend the functionaliy. As of now,   * 	the variable length type supports shifting the byte string to   * 	the right, eating up any number of octets and thus supporting diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index e1429a85091f..29b942ce9e82 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -183,7 +183,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)   * filters in qdisc and in inner nodes (if higher filter points to the inner   * node). If we end up with classid MAJOR:0 we enqueue the skb into special   * internal fifo (direct). These packets then go directly thru. If we still - * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull + * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful   * then finish and return direct queue.   */  #define HTB_DIRECT ((struct htb_class *)-1L) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index edbbf7ad6623..69c35f6cd13f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -160,7 +160,7 @@ static bool loss_4state(struct netem_sched_data *q)  	u32 rnd = net_random();  	/* -	 * Makes a comparision between rnd and the transition +	 * Makes a comparison between rnd and the transition  	 * probabilities outgoing from the current state, then decides the  	 * next state and if the next packet has to be transmitted or lost.  	 * The four states correspond to: @@ -212,9 +212,9 @@ static bool loss_4state(struct netem_sched_data *q)   * Generates losses according to the Gilbert-Elliot loss model or   * its special cases  (Gilbert or Simple Gilbert)   * - * Makes a comparision between random number and the transition + * Makes a comparison between random number and the transition   * probabilities outgoing from the current state, then decides the - * next state. A second random number is extracted and the comparision + * next state. A second random number is extracted and the comparison   * with the loss probability of the current state decides if the next   * packet will be transmitted or lost.   */ diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 6b04287913cd..0698cad61763 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1593,7 +1593,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)  	struct sctp_chunk *ack;  	struct sctp_chunk *tmp; -	/* We can remove all the entries from the queue upto +	/* We can remove all the entries from the queue up to  	 * the "Peer-Sequence-Number".  	 */  	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, diff --git a/net/sctp/auth.c b/net/sctp/auth.c index ddbbf7c81fa1..865e68fef21c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -113,7 +113,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)  	return new;  } -/* Free the shared key stucture */ +/* Free the shared key structure */  static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)  {  	BUG_ON(!list_empty(&sh_key->key_list)); @@ -122,7 +122,7 @@ static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)  	kfree(sh_key);  } -/* Destory the entire key list.  This is done during the +/* Destroy the entire key list.  This is done during the   * associon and endpoint free process.   */  void sctp_auth_destroy_keys(struct list_head *keys) @@ -324,7 +324,7 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(  	if (!peer_key_vector || !local_key_vector)  		goto out; -	/* Figure out the order in wich the key_vectors will be +	/* Figure out the order in which the key_vectors will be  	 * added to the endpoint shared key.  	 * SCTP-AUTH, Section 6.1:  	 *   This is performed by selecting the numerically smaller key diff --git a/net/sctp/input.c b/net/sctp/input.c index 826661be73e7..5436c6921167 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -1034,7 +1034,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(  *    association.  *  * This means that any chunks that can help us identify the association need -* to be looked at to find this assocation. +* to be looked at to find this association.  */  static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,  				      const union sctp_addr *laddr, diff --git a/net/sctp/output.c b/net/sctp/output.c index 60600d337a3a..b4f3cf06d8da 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -510,7 +510,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)  		sh->checksum = sctp_end_cksum(crc32);  	} else {  		if (dst->dev->features & NETIF_F_SCTP_CSUM) { -			/* no need to seed psuedo checksum for SCTP */ +			/* no need to seed pseudo checksum for SCTP */  			nskb->ip_summed = CHECKSUM_PARTIAL;  			nskb->csum_start = (skb_transport_header(nskb) -  			                    nskb->head); diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 26dc005113a0..bf92a5b68f8b 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -177,13 +177,13 @@ static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)   * 3) If the missing report count for TSN t is to be   * incremented according to [RFC2960] and   * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, - * then the sender MUST futher execute steps 3.1 and + * then the sender MUST further execute steps 3.1 and   * 3.2 to determine if the missing report count for   * TSN t SHOULD NOT be incremented.   *   * 3.3) If 3.1 and 3.2 do not dictate that the missing   * report count for t should not be incremented, then - * the sender SOULD increment missing report count for + * the sender SHOULD increment missing report count for   * t (according to [RFC2960] and [SCTP_STEWART_2002]).   */  static inline int sctp_cacc_skip(struct sctp_transport *primary, @@ -843,7 +843,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)  		case SCTP_CID_ECN_CWR:  		case SCTP_CID_ASCONF_ACK:  			one_packet = 1; -			/* Fall throught */ +			/* Fall through */  		case SCTP_CID_SACK:  		case SCTP_CID_HEARTBEAT: diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b21b218d564f..5f86ee4b54c1 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -482,7 +482,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,  	 * If the timer was a heartbeat, we only increment error counts  	 * when we already have an outstanding HEARTBEAT that has not  	 * been acknowledged. -	 * Additionaly, some tranport states inhibit error increments. +	 * Additionally, some tranport states inhibit error increments.  	 */  	if (!is_hb) {  		asoc->overall_error_count++; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 4b4eb7c96bbd..76792083c379 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -551,7 +551,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,  		 *  		 * This means that if we only want to abort associations  		 * in an authenticated way (i.e AUTH+ABORT), then we -		 * can't destroy this association just becuase the packet +		 * can't destroy this association just because the packet  		 * was malformed.  		 */  		if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) @@ -1546,7 +1546,7 @@ cleanup:  }  /* - * Handle simultanous INIT. + * Handle simultaneous INIT.   * This means we started an INIT and then we got an INIT request from   * our peer.   * @@ -2079,7 +2079,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(  	 * RFC 2960, Section 3.3.7  	 *    If an endpoint receives an ABORT with a format error or for an  	 *    association that doesn't exist, it MUST silently discard it. -	 * Becasue the length is "invalid", we can't really discard just +	 * Because the length is "invalid", we can't really discard just  	 * as we do not know its true length.  So, to be safe, discard the  	 * packet.  	 */ @@ -2120,7 +2120,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,  	 * RFC 2960, Section 3.3.7  	 *    If an endpoint receives an ABORT with a format error or for an  	 *    association that doesn't exist, it MUST silently discard it. -	 * Becasue the length is "invalid", we can't really discard just +	 * Because the length is "invalid", we can't really discard just  	 * as we do not know its true length.  So, to be safe, discard the  	 * packet.  	 */ @@ -2381,7 +2381,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,  	 * RFC 2960, Section 3.3.7  	 *    If an endpoint receives an ABORT with a format error or for an  	 *    association that doesn't exist, it MUST silently discard it. -	 * Becasue the length is "invalid", we can't really discard just +	 * Because the length is "invalid", we can't really discard just  	 * as we do not know its true length.  So, to be safe, discard the  	 * packet.  	 */ @@ -2448,7 +2448,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,  	 * RFC 2960, Section 3.3.7  	 *    If an endpoint receives an ABORT with a format error or for an  	 *    association that doesn't exist, it MUST silently discard it. -	 * Becasue the length is "invalid", we can't really discard just +	 * Because the length is "invalid", we can't really discard just  	 * as we do not know its true length.  So, to be safe, discard the  	 * packet.  	 */ @@ -3855,7 +3855,7 @@ gen_shutdown:  }  /* - * SCTP-AUTH Section 6.3 Receving authenticated chukns + * SCTP-AUTH Section 6.3 Receiving authenticated chukns   *   *    The receiver MUST use the HMAC algorithm indicated in the HMAC   *    Identifier field.  If this algorithm was not specified by the @@ -4231,7 +4231,7 @@ static sctp_disposition_t sctp_sf_abort_violation(  	 *  	 * This means that if we only want to abort associations  	 * in an authenticated way (i.e AUTH+ABORT), then we -	 * can't destroy this association just becuase the packet +	 * can't destroy this association just because the packet  	 * was malformed.  	 */  	if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) @@ -4402,9 +4402,9 @@ static sctp_disposition_t sctp_sf_violation_ctsn(  }  /* Handle protocol violation of an invalid chunk bundling.  For example, - * when we have an association and we recieve bundled INIT-ACK, or + * when we have an association and we receive bundled INIT-ACK, or   * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" - * statement from the specs.  Additinally, there might be an attacker + * statement from the specs.  Additionally, there might be an attacker   * on the path and we may not want to continue this communication.   */  static sctp_disposition_t sctp_sf_violation_chunk( diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3951a10605bc..deb82e35a107 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1193,7 +1193,7 @@ out_free:   * an endpoint that is multi-homed.  Much like sctp_bindx() this call   * allows a caller to specify multiple addresses at which a peer can be   * reached.  The way the SCTP stack uses the list of addresses to set up - * the association is implementation dependant.  This function only + * the association is implementation dependent.  This function only   * specifies that the stack will try to make use of all the addresses in   * the list when needed.   * diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index aa72e89c3ee1..dff27d5e22fd 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -554,7 +554,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(  	memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));  	/* Per TSVWG discussion with Randy. Allow the application to -	 * ressemble a fragmented message. +	 * resemble a fragmented message.  	 */  	ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 17678189d054..f2d1de7f2ffb 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -240,7 +240,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)  		} else {  			/*  			 * If fragment interleave is enabled, we -			 * can queue this to the recieve queue instead +			 * can queue this to the receive queue instead  			 * of the lobby.  			 */  			if (sctp_sk(sk)->frag_interleave) diff --git a/net/socket.c b/net/socket.c index 5212447c86e7..310d16b1b3c9 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2986,7 +2986,7 @@ out:  /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE   * for some operations; this forces use of the newer bridge-utils that - * use compatiable ioctls + * use compatible ioctls   */  static int old_bridge_ioctl(compat_ulong_t __user *argp)  { diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index bcdae78fdfc6..8d0f7d3c71c8 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1101,7 +1101,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)  	/* credential is:  	 *   version(==1), proc(0,1,2,3), seq, service (1,2,3), handle -	 * at least 5 u32s, and is preceeded by length, so that makes 6. +	 * at least 5 u32s, and is preceded by length, so that makes 6.  	 */  	if (argv->iov_len < 5 * 4) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1e336a06d3e6..bf005d3c65ef 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -504,7 +504,7 @@ static int xs_nospace(struct rpc_task *task)   *   EAGAIN:	The socket was blocked, please call again later to   *		complete the request   * ENOTCONN:	Caller needs to invoke connect logic then call again - *    other:	Some other error occured, the request was not sent + *    other:	Some other error occurred, the request was not sent   */  static int xs_udp_send_request(struct rpc_task *task)  { @@ -590,7 +590,7 @@ static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)   *   EAGAIN:	The socket was blocked, please call again later to   *		complete the request   * ENOTCONN:	Caller needs to invoke connect logic then call again - *    other:	Some other error occured, the request was not sent + *    other:	Some other error occurred, the request was not sent   *   * XXX: In the case of soft timeouts, should we eventually give up   *	if sendmsg is not able to make progress? diff --git a/net/tipc/link.c b/net/tipc/link.c index 43639ff1cbec..ebf338f7b14e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2471,7 +2471,7 @@ exit:   * A pending message being re-assembled must store certain values   * to handle subsequent fragments correctly. The following functions   * help storing these values in unused, available fields in the - * pending message. This makes dynamic memory allocation unecessary. + * pending message. This makes dynamic memory allocation unnecessary.   */  static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index c9fa6dfcf287..80025a1b3bfd 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -160,7 +160,7 @@ void tipc_named_withdraw(struct publication *publ)  	buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);  	if (!buf) { -		warn("Withdrawl distribution failure\n"); +		warn("Withdrawal distribution failure\n");  		return;  	} diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1663e1a2efdd..3a43a8304768 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -207,7 +207,7 @@ static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)  		/*  		 * This may look like an off by one error but it is a bit more  		 * subtle. 108 is the longest valid AF_UNIX path for a binding. -		 * sun_path[108] doesnt as such exist.  However in kernel space +		 * sun_path[108] doesn't as such exist.  However in kernel space  		 * we are guaranteed that it is a valid memory location in our  		 * kernel address buffer.  		 */ diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c index 11f25c7a7a05..f346395314ba 100644 --- a/net/wanrouter/wanproc.c +++ b/net/wanrouter/wanproc.c @@ -51,7 +51,7 @@  /*   *	Structures for interfacing with the /proc filesystem. - *	Router creates its own directory /proc/net/router with the folowing + *	Router creates its own directory /proc/net/router with the following   *	entries:   *	config		device configuration   *	status		global device statistics diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 3332d5bce317..ab801a1097b2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -809,7 +809,7 @@ static void handle_channel(struct wiphy *wiphy,  	if (r) {  		/*  		 * We will disable all channels that do not match our -		 * recieved regulatory rule unless the hint is coming +		 * received regulatory rule unless the hint is coming  		 * from a Country IE and the Country IE had no information  		 * about a band. The IEEE 802.11 spec allows for an AP  		 * to send only a subset of the regulatory rules allowed, @@ -838,7 +838,7 @@ static void handle_channel(struct wiphy *wiphy,  	    request_wiphy && request_wiphy == wiphy &&  	    request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {  		/* -		 * This gaurantees the driver's requested regulatory domain +		 * This guarantees the driver's requested regulatory domain  		 * will always be used as a base for further regulatory  		 * settings  		 */ diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 406207515b5e..f77e4e75f914 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -31,7 +31,7 @@   * x25_parse_facilities - Parse facilities from skb into the facilities structs   *   * @skb: sk_buff to parse - * @facilities: Regular facilites, updated as facilities are found + * @facilities: Regular facilities, updated as facilities are found   * @dte_facs: ITU DTE facilities, updated as DTE facilities are found   * @vc_fac_mask: mask is updated with all facilities found   * diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c index 25a810793968..c541b622ae16 100644 --- a/net/x25/x25_forward.c +++ b/net/x25/x25_forward.c @@ -31,7 +31,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,  		goto out_no_route;  	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) { -		/* This shouldnt happen, if it occurs somehow +		/* This shouldn't happen, if it occurs somehow  		 * do something sensible  		 */  		goto out_put_route; @@ -45,7 +45,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,  	}  	/* Remote end sending a call request on an already -	 * established LCI? It shouldnt happen, just in case.. +	 * established LCI? It shouldn't happen, just in case..  	 */  	read_lock_bh(&x25_forward_list_lock);  	list_for_each(entry, &x25_forward_list) { diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3d15d3e1b2c4..5d1d60d3ca83 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -894,7 +894,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,  	u32 *f;  	nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); -	if (nlh == NULL) /* shouldnt really happen ... */ +	if (nlh == NULL) /* shouldn't really happen ... */  		return -EMSGSIZE;  	f = nlmsg_data(nlh); @@ -954,7 +954,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,  	u32 *f;  	nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); -	if (nlh == NULL) /* shouldnt really happen ... */ +	if (nlh == NULL) /* shouldn't really happen ... */  		return -EMSGSIZE;  	f = nlmsg_data(nlh); @@ -1361,7 +1361,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,  	if (!xp)  		return err; -	/* shouldnt excl be based on nlh flags?? +	/* shouldn't excl be based on nlh flags??  	 * Aha! this is anti-netlink really i.e  more pfkey derived  	 * in netlink excl is a flag and you wouldnt need  	 * a type XFRM_MSG_UPDPOLICY - JHS */ | 
